clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SIISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Analysis -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ASMParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/BinaryFormat -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitcode -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Bitstream -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /include/llvm/CodeGen -I /include/llvm/CodeGen/PBQP -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IR -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Coroutines -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData/Coverage -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/CodeView -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/DWARF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/MSF -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/PDB -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Demangle -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/JITLink -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ExecutionEngine/Orc -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenACC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Frontend/OpenMP -I /include/llvm/CodeGen/GlobalISel -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/IRReader -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/Transforms/InstCombine -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/LTO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Linker -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/MC/MCParser -I /include/llvm/CodeGen/MIRParser -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Object -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Option -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Passes -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ProfileData -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Scalar -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/ADT -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Support -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/DebugInfo/Symbolize -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Target -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Utils -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/Vectorize -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include/llvm/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/X86 -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include/llvm/Transforms/IPO -I /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libLLVM/../include -I /usr/src/gnu/usr.bin/clang/libLLVM/obj -I /usr/src/gnu/usr.bin/clang/libLLVM/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libLLVM/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libLLVM/../../../llvm/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
1 | |
2 | |
3 | |
4 | |
5 | |
6 | |
7 | |
8 | |
9 | |
10 | |
11 | |
12 | |
13 | |
14 | #include "SIISelLowering.h" |
15 | #include "AMDGPU.h" |
16 | #include "AMDGPUInstrInfo.h" |
17 | #include "AMDGPUTargetMachine.h" |
18 | #include "SIMachineFunctionInfo.h" |
19 | #include "SIRegisterInfo.h" |
20 | #include "llvm/ADT/Statistic.h" |
21 | #include "llvm/Analysis/LegacyDivergenceAnalysis.h" |
22 | #include "llvm/BinaryFormat/ELF.h" |
23 | #include "llvm/CodeGen/Analysis.h" |
24 | #include "llvm/CodeGen/FunctionLoweringInfo.h" |
25 | #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" |
26 | #include "llvm/CodeGen/MachineLoopInfo.h" |
27 | #include "llvm/IR/DiagnosticInfo.h" |
28 | #include "llvm/IR/IntrinsicInst.h" |
29 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
30 | #include "llvm/IR/IntrinsicsR600.h" |
31 | #include "llvm/Support/CommandLine.h" |
32 | #include "llvm/Support/KnownBits.h" |
33 | |
34 | using namespace llvm; |
35 | |
36 | #define DEBUG_TYPE "si-lower" |
37 | |
38 | STATISTIC(NumTailCalls, "Number of tail calls"); |
39 | |
40 | static cl::opt<bool> DisableLoopAlignment( |
41 | "amdgpu-disable-loop-alignment", |
42 | cl::desc("Do not align and prefetch loops"), |
43 | cl::init(false)); |
44 | |
45 | static cl::opt<bool> VGPRReserveforSGPRSpill( |
46 | "amdgpu-reserve-vgpr-for-sgpr-spill", |
47 | cl::desc("Allocates one VGPR for future SGPR Spill"), cl::init(true)); |
48 | |
49 | static cl::opt<bool> UseDivergentRegisterIndexing( |
50 | "amdgpu-use-divergent-register-indexing", |
51 | cl::Hidden, |
52 | cl::desc("Use indirect register addressing for divergent indexes"), |
53 | cl::init(false)); |
54 | |
55 | static bool hasFP32Denormals(const MachineFunction &MF) { |
56 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
57 | return Info->getMode().allFP32Denormals(); |
58 | } |
59 | |
60 | static bool hasFP64FP16Denormals(const MachineFunction &MF) { |
61 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
62 | return Info->getMode().allFP64FP16Denormals(); |
63 | } |
64 | |
65 | static unsigned findFirstFreeSGPR(CCState &CCInfo) { |
66 | unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); |
67 | for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { |
68 | if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { |
69 | return AMDGPU::SGPR0 + Reg; |
70 | } |
71 | } |
72 | llvm_unreachable("Cannot allocate sgpr"); |
73 | } |
74 | |
75 | SITargetLowering::SITargetLowering(const TargetMachine &TM, |
76 | const GCNSubtarget &STI) |
77 | : AMDGPUTargetLowering(TM, STI), |
78 | Subtarget(&STI) { |
79 | addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); |
80 | addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); |
81 | |
82 | addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass); |
83 | addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); |
84 | |
85 | addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); |
86 | |
87 | const SIRegisterInfo *TRI = STI.getRegisterInfo(); |
88 | const TargetRegisterClass *V64RegClass = TRI->getVGPR64Class(); |
89 | |
90 | addRegisterClass(MVT::f64, V64RegClass); |
91 | addRegisterClass(MVT::v2f32, V64RegClass); |
92 | |
93 | addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); |
94 | addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96)); |
95 | |
96 | addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass); |
97 | addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass); |
98 | |
99 | addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass); |
100 | addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128)); |
101 | |
102 | addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); |
103 | addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160)); |
104 | |
105 | addRegisterClass(MVT::v6i32, &AMDGPU::SGPR_192RegClass); |
106 | addRegisterClass(MVT::v6f32, TRI->getVGPRClassForBitWidth(192)); |
107 | |
108 | addRegisterClass(MVT::v3i64, &AMDGPU::SGPR_192RegClass); |
109 | addRegisterClass(MVT::v3f64, TRI->getVGPRClassForBitWidth(192)); |
110 | |
111 | addRegisterClass(MVT::v7i32, &AMDGPU::SGPR_224RegClass); |
112 | addRegisterClass(MVT::v7f32, TRI->getVGPRClassForBitWidth(224)); |
113 | |
114 | addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass); |
115 | addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256)); |
116 | |
117 | addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass); |
118 | addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256)); |
119 | |
120 | addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass); |
121 | addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512)); |
122 | |
123 | addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass); |
124 | addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512)); |
125 | |
126 | addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass); |
127 | addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024)); |
128 | |
129 | if (Subtarget->has16BitInsts()) { |
130 | addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass); |
131 | addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass); |
132 | |
133 | |
134 | addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32RegClass); |
135 | addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32RegClass); |
136 | addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); |
137 | addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); |
138 | } |
139 | |
140 | addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass); |
141 | addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024)); |
142 | |
143 | computeRegisterProperties(Subtarget->getRegisterInfo()); |
144 | |
145 | |
146 | |
147 | |
148 | |
149 | setBooleanContents(ZeroOrOneBooleanContent); |
150 | setBooleanVectorContents(ZeroOrOneBooleanContent); |
151 | |
152 | |
153 | setOperationAction(ISD::LOAD, MVT::v2i32, Custom); |
154 | setOperationAction(ISD::LOAD, MVT::v3i32, Custom); |
155 | setOperationAction(ISD::LOAD, MVT::v4i32, Custom); |
156 | setOperationAction(ISD::LOAD, MVT::v5i32, Custom); |
157 | setOperationAction(ISD::LOAD, MVT::v6i32, Custom); |
158 | setOperationAction(ISD::LOAD, MVT::v7i32, Custom); |
159 | setOperationAction(ISD::LOAD, MVT::v8i32, Custom); |
160 | setOperationAction(ISD::LOAD, MVT::v16i32, Custom); |
161 | setOperationAction(ISD::LOAD, MVT::i1, Custom); |
162 | setOperationAction(ISD::LOAD, MVT::v32i32, Custom); |
163 | |
164 | setOperationAction(ISD::STORE, MVT::v2i32, Custom); |
165 | setOperationAction(ISD::STORE, MVT::v3i32, Custom); |
166 | setOperationAction(ISD::STORE, MVT::v4i32, Custom); |
167 | setOperationAction(ISD::STORE, MVT::v5i32, Custom); |
168 | setOperationAction(ISD::STORE, MVT::v6i32, Custom); |
169 | setOperationAction(ISD::STORE, MVT::v7i32, Custom); |
170 | setOperationAction(ISD::STORE, MVT::v8i32, Custom); |
171 | setOperationAction(ISD::STORE, MVT::v16i32, Custom); |
172 | setOperationAction(ISD::STORE, MVT::i1, Custom); |
173 | setOperationAction(ISD::STORE, MVT::v32i32, Custom); |
174 | |
175 | setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); |
176 | setTruncStoreAction(MVT::v3i32, MVT::v3i16, Expand); |
177 | setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); |
178 | setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); |
179 | setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); |
180 | setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); |
181 | setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); |
182 | setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); |
183 | setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); |
184 | setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); |
185 | setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); |
186 | setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand); |
187 | setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand); |
188 | setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand); |
189 | setTruncStoreAction(MVT::v16i16, MVT::v16i8, Expand); |
190 | setTruncStoreAction(MVT::v32i16, MVT::v32i8, Expand); |
191 | |
192 | setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand); |
193 | setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand); |
194 | setTruncStoreAction(MVT::v4i64, MVT::v4i8, Expand); |
195 | setTruncStoreAction(MVT::v8i64, MVT::v8i8, Expand); |
196 | setTruncStoreAction(MVT::v8i64, MVT::v8i16, Expand); |
197 | setTruncStoreAction(MVT::v8i64, MVT::v8i32, Expand); |
198 | setTruncStoreAction(MVT::v16i64, MVT::v16i32, Expand); |
199 | |
200 | setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); |
201 | setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); |
202 | |
203 | setOperationAction(ISD::SELECT, MVT::i1, Promote); |
204 | setOperationAction(ISD::SELECT, MVT::i64, Custom); |
205 | setOperationAction(ISD::SELECT, MVT::f64, Promote); |
206 | AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); |
207 | |
208 | setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); |
209 | setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); |
210 | setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); |
211 | setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); |
212 | setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); |
213 | |
214 | setOperationAction(ISD::SETCC, MVT::i1, Promote); |
215 | setOperationAction(ISD::SETCC, MVT::v2i1, Expand); |
216 | setOperationAction(ISD::SETCC, MVT::v4i1, Expand); |
217 | AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); |
218 | |
219 | setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); |
220 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); |
221 | setOperationAction(ISD::TRUNCATE, MVT::v3i32, Expand); |
222 | setOperationAction(ISD::FP_ROUND, MVT::v3f32, Expand); |
223 | setOperationAction(ISD::TRUNCATE, MVT::v4i32, Expand); |
224 | setOperationAction(ISD::FP_ROUND, MVT::v4f32, Expand); |
225 | setOperationAction(ISD::TRUNCATE, MVT::v5i32, Expand); |
226 | setOperationAction(ISD::FP_ROUND, MVT::v5f32, Expand); |
227 | setOperationAction(ISD::TRUNCATE, MVT::v6i32, Expand); |
228 | setOperationAction(ISD::FP_ROUND, MVT::v6f32, Expand); |
229 | setOperationAction(ISD::TRUNCATE, MVT::v7i32, Expand); |
230 | setOperationAction(ISD::FP_ROUND, MVT::v7f32, Expand); |
231 | setOperationAction(ISD::TRUNCATE, MVT::v8i32, Expand); |
232 | setOperationAction(ISD::FP_ROUND, MVT::v8f32, Expand); |
233 | setOperationAction(ISD::TRUNCATE, MVT::v16i32, Expand); |
234 | setOperationAction(ISD::FP_ROUND, MVT::v16f32, Expand); |
235 | |
236 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); |
237 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); |
238 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); |
239 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); |
240 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); |
241 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v3i16, Custom); |
242 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); |
243 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); |
244 | |
245 | setOperationAction(ISD::BRCOND, MVT::Other, Custom); |
246 | setOperationAction(ISD::BR_CC, MVT::i1, Expand); |
247 | setOperationAction(ISD::BR_CC, MVT::i32, Expand); |
248 | setOperationAction(ISD::BR_CC, MVT::i64, Expand); |
249 | setOperationAction(ISD::BR_CC, MVT::f32, Expand); |
250 | setOperationAction(ISD::BR_CC, MVT::f64, Expand); |
251 | |
252 | setOperationAction(ISD::UADDO, MVT::i32, Legal); |
253 | setOperationAction(ISD::USUBO, MVT::i32, Legal); |
254 | |
255 | setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); |
256 | setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); |
257 | |
258 | setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); |
259 | setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); |
260 | setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); |
261 | |
262 | #if 0 |
263 | setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); |
264 | setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); |
265 | #endif |
266 | |
267 | |
268 | |
269 | for (MVT VT : { MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, |
270 | MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16, |
271 | MVT::v3i64, MVT::v3f64, MVT::v6i32, MVT::v6f32, |
272 | MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64, |
273 | MVT::v16i64, MVT::v16f64, MVT::v32i32, MVT::v32f32 }) { |
274 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { |
275 | switch (Op) { |
276 | case ISD::LOAD: |
277 | case ISD::STORE: |
278 | case ISD::BUILD_VECTOR: |
279 | case ISD::BITCAST: |
280 | case ISD::EXTRACT_VECTOR_ELT: |
281 | case ISD::INSERT_VECTOR_ELT: |
282 | case ISD::EXTRACT_SUBVECTOR: |
283 | case ISD::SCALAR_TO_VECTOR: |
284 | break; |
285 | case ISD::INSERT_SUBVECTOR: |
286 | case ISD::CONCAT_VECTORS: |
287 | setOperationAction(Op, VT, Custom); |
288 | break; |
289 | default: |
290 | setOperationAction(Op, VT, Expand); |
291 | break; |
292 | } |
293 | } |
294 | } |
295 | |
296 | setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand); |
297 | |
298 | |
299 | |
300 | |
301 | |
302 | |
303 | for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { |
304 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
305 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); |
306 | |
307 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
308 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); |
309 | |
310 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
311 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); |
312 | |
313 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
314 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); |
315 | } |
316 | |
317 | for (MVT Vec64 : { MVT::v3i64, MVT::v3f64 }) { |
318 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
319 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v6i32); |
320 | |
321 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
322 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v6i32); |
323 | |
324 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
325 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v6i32); |
326 | |
327 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
328 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v6i32); |
329 | } |
330 | |
331 | for (MVT Vec64 : { MVT::v4i64, MVT::v4f64 }) { |
332 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
333 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32); |
334 | |
335 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
336 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v8i32); |
337 | |
338 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
339 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v8i32); |
340 | |
341 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
342 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v8i32); |
343 | } |
344 | |
345 | for (MVT Vec64 : { MVT::v8i64, MVT::v8f64 }) { |
346 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
347 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32); |
348 | |
349 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
350 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v16i32); |
351 | |
352 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
353 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v16i32); |
354 | |
355 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
356 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v16i32); |
357 | } |
358 | |
359 | for (MVT Vec64 : { MVT::v16i64, MVT::v16f64 }) { |
360 | setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); |
361 | AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v32i32); |
362 | |
363 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); |
364 | AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v32i32); |
365 | |
366 | setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); |
367 | AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v32i32); |
368 | |
369 | setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); |
370 | AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v32i32); |
371 | } |
372 | |
373 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); |
374 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); |
375 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); |
376 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); |
377 | |
378 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom); |
379 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); |
380 | |
381 | |
382 | |
383 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); |
384 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); |
385 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); |
386 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); |
387 | |
388 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom); |
389 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom); |
390 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom); |
391 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom); |
392 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); |
393 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom); |
394 | |
395 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom); |
396 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom); |
397 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); |
398 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); |
399 | |
400 | |
401 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom); |
402 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom); |
403 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom); |
404 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom); |
405 | |
406 | |
407 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom); |
408 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom); |
409 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v6i32, Custom); |
410 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v6f32, Custom); |
411 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v7i32, Custom); |
412 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v7f32, Custom); |
413 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom); |
414 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom); |
415 | |
416 | |
417 | |
418 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); |
419 | setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); |
420 | |
421 | |
422 | |
423 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); |
424 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); |
425 | |
426 | if (Subtarget->hasFlatAddressSpace()) { |
427 | setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); |
428 | setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); |
429 | } |
430 | |
431 | setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); |
432 | setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); |
433 | |
434 | |
435 | |
436 | |
437 | setOperationAction(ISD::BSWAP, MVT::i64, Legal); |
438 | setOperationAction(ISD::BSWAP, MVT::i32, Legal); |
439 | |
440 | |
441 | setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); |
442 | setOperationAction(ISD::TRAP, MVT::Other, Custom); |
443 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); |
444 | |
445 | if (Subtarget->has16BitInsts()) { |
446 | setOperationAction(ISD::FPOW, MVT::f16, Promote); |
447 | setOperationAction(ISD::FPOWI, MVT::f16, Promote); |
448 | setOperationAction(ISD::FLOG, MVT::f16, Custom); |
449 | setOperationAction(ISD::FEXP, MVT::f16, Custom); |
450 | setOperationAction(ISD::FLOG10, MVT::f16, Custom); |
451 | } |
452 | |
453 | if (Subtarget->hasMadMacF32Insts()) |
454 | setOperationAction(ISD::FMAD, MVT::f32, Legal); |
455 | |
456 | if (!Subtarget->hasBFI()) { |
457 | |
458 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); |
459 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); |
460 | } |
461 | |
462 | if (!Subtarget->hasBCNT(32)) |
463 | setOperationAction(ISD::CTPOP, MVT::i32, Expand); |
464 | |
465 | if (!Subtarget->hasBCNT(64)) |
466 | setOperationAction(ISD::CTPOP, MVT::i64, Expand); |
467 | |
468 | if (Subtarget->hasFFBH()) |
469 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); |
470 | |
471 | if (Subtarget->hasFFBL()) |
472 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); |
473 | |
474 | |
475 | |
476 | |
477 | |
478 | |
479 | |
480 | |
481 | |
482 | if (Subtarget->hasBFE()) |
483 | setHasExtractBitsInsn(true); |
484 | |
485 | |
486 | if (Subtarget->hasIntClamp()) { |
487 | setOperationAction(ISD::UADDSAT, MVT::i32, Legal); |
488 | setOperationAction(ISD::USUBSAT, MVT::i32, Legal); |
489 | } |
490 | |
491 | if (Subtarget->hasAddNoCarry()) { |
492 | setOperationAction(ISD::SADDSAT, MVT::i16, Legal); |
493 | setOperationAction(ISD::SSUBSAT, MVT::i16, Legal); |
494 | setOperationAction(ISD::SADDSAT, MVT::i32, Legal); |
495 | setOperationAction(ISD::SSUBSAT, MVT::i32, Legal); |
496 | } |
497 | |
498 | setOperationAction(ISD::FMINNUM, MVT::f32, Custom); |
499 | setOperationAction(ISD::FMAXNUM, MVT::f32, Custom); |
500 | setOperationAction(ISD::FMINNUM, MVT::f64, Custom); |
501 | setOperationAction(ISD::FMAXNUM, MVT::f64, Custom); |
502 | |
503 | |
504 | |
505 | |
506 | |
507 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); |
508 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); |
509 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); |
510 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); |
511 | |
512 | |
513 | if (Subtarget->haveRoundOpsF64()) { |
514 | setOperationAction(ISD::FTRUNC, MVT::f64, Legal); |
515 | setOperationAction(ISD::FCEIL, MVT::f64, Legal); |
516 | setOperationAction(ISD::FRINT, MVT::f64, Legal); |
517 | } else { |
518 | setOperationAction(ISD::FCEIL, MVT::f64, Custom); |
519 | setOperationAction(ISD::FTRUNC, MVT::f64, Custom); |
520 | setOperationAction(ISD::FRINT, MVT::f64, Custom); |
521 | setOperationAction(ISD::FFLOOR, MVT::f64, Custom); |
522 | } |
523 | |
524 | setOperationAction(ISD::FFLOOR, MVT::f64, Legal); |
525 | |
526 | setOperationAction(ISD::FSIN, MVT::f32, Custom); |
527 | setOperationAction(ISD::FCOS, MVT::f32, Custom); |
528 | setOperationAction(ISD::FDIV, MVT::f32, Custom); |
529 | setOperationAction(ISD::FDIV, MVT::f64, Custom); |
530 | |
531 | if (Subtarget->has16BitInsts()) { |
532 | setOperationAction(ISD::Constant, MVT::i16, Legal); |
533 | |
534 | setOperationAction(ISD::SMIN, MVT::i16, Legal); |
535 | setOperationAction(ISD::SMAX, MVT::i16, Legal); |
536 | |
537 | setOperationAction(ISD::UMIN, MVT::i16, Legal); |
538 | setOperationAction(ISD::UMAX, MVT::i16, Legal); |
539 | |
540 | setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); |
541 | AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); |
542 | |
543 | setOperationAction(ISD::ROTR, MVT::i16, Expand); |
544 | setOperationAction(ISD::ROTL, MVT::i16, Expand); |
545 | |
546 | setOperationAction(ISD::SDIV, MVT::i16, Promote); |
547 | setOperationAction(ISD::UDIV, MVT::i16, Promote); |
548 | setOperationAction(ISD::SREM, MVT::i16, Promote); |
549 | setOperationAction(ISD::UREM, MVT::i16, Promote); |
550 | setOperationAction(ISD::UADDSAT, MVT::i16, Legal); |
551 | setOperationAction(ISD::USUBSAT, MVT::i16, Legal); |
552 | |
553 | setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); |
554 | |
555 | setOperationAction(ISD::CTTZ, MVT::i16, Promote); |
556 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); |
557 | setOperationAction(ISD::CTLZ, MVT::i16, Promote); |
558 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); |
559 | setOperationAction(ISD::CTPOP, MVT::i16, Promote); |
560 | |
561 | setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); |
562 | |
563 | setOperationAction(ISD::BR_CC, MVT::i16, Expand); |
564 | |
565 | setOperationAction(ISD::LOAD, MVT::i16, Custom); |
566 | |
567 | setTruncStoreAction(MVT::i64, MVT::i16, Expand); |
568 | |
569 | setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); |
570 | AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); |
571 | setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); |
572 | AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); |
573 | |
574 | setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom); |
575 | setOperationAction(ISD::FP_TO_UINT, MVT::i16, Custom); |
576 | |
577 | |
578 | setOperationAction(ISD::ConstantFP, MVT::f16, Legal); |
579 | |
580 | |
581 | setOperationAction(ISD::LOAD, MVT::f16, Promote); |
582 | AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); |
583 | setOperationAction(ISD::STORE, MVT::f16, Promote); |
584 | AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); |
585 | |
586 | |
587 | setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); |
588 | setOperationAction(ISD::FCOS, MVT::f16, Custom); |
589 | setOperationAction(ISD::FSIN, MVT::f16, Custom); |
590 | |
591 | setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom); |
592 | setOperationAction(ISD::UINT_TO_FP, MVT::i16, Custom); |
593 | |
594 | setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); |
595 | setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); |
596 | setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); |
597 | setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); |
598 | setOperationAction(ISD::FROUND, MVT::f16, Custom); |
599 | |
600 | |
601 | setOperationAction(ISD::BR_CC, MVT::f16, Expand); |
602 | setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); |
603 | |
604 | setOperationAction(ISD::FDIV, MVT::f16, Custom); |
605 | |
606 | |
607 | setOperationAction(ISD::FMA, MVT::f16, Legal); |
608 | if (STI.hasMadF16()) |
609 | setOperationAction(ISD::FMAD, MVT::f16, Legal); |
610 | |
611 | for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) { |
612 | for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { |
613 | switch (Op) { |
614 | case ISD::LOAD: |
615 | case ISD::STORE: |
616 | case ISD::BUILD_VECTOR: |
617 | case ISD::BITCAST: |
618 | case ISD::EXTRACT_VECTOR_ELT: |
619 | case ISD::INSERT_VECTOR_ELT: |
620 | case ISD::INSERT_SUBVECTOR: |
621 | case ISD::EXTRACT_SUBVECTOR: |
622 | case ISD::SCALAR_TO_VECTOR: |
623 | break; |
624 | case ISD::CONCAT_VECTORS: |
625 | setOperationAction(Op, VT, Custom); |
626 | break; |
627 | default: |
628 | setOperationAction(Op, VT, Expand); |
629 | break; |
630 | } |
631 | } |
632 | } |
633 | |
634 | |
635 | setOperationAction(ISD::BSWAP, MVT::i16, Legal); |
636 | setOperationAction(ISD::BSWAP, MVT::v2i16, Legal); |
637 | setOperationAction(ISD::BSWAP, MVT::v4i16, Custom); |
638 | |
639 | |
640 | setOperationAction(ISD::Constant, MVT::v2i16, Legal); |
641 | setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); |
642 | |
643 | setOperationAction(ISD::UNDEF, MVT::v2i16, Legal); |
644 | setOperationAction(ISD::UNDEF, MVT::v2f16, Legal); |
645 | |
646 | setOperationAction(ISD::STORE, MVT::v2i16, Promote); |
647 | AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); |
648 | setOperationAction(ISD::STORE, MVT::v2f16, Promote); |
649 | AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); |
650 | |
651 | setOperationAction(ISD::LOAD, MVT::v2i16, Promote); |
652 | AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); |
653 | setOperationAction(ISD::LOAD, MVT::v2f16, Promote); |
654 | AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); |
655 | |
656 | setOperationAction(ISD::AND, MVT::v2i16, Promote); |
657 | AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); |
658 | setOperationAction(ISD::OR, MVT::v2i16, Promote); |
659 | AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); |
660 | setOperationAction(ISD::XOR, MVT::v2i16, Promote); |
661 | AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); |
662 | |
663 | setOperationAction(ISD::LOAD, MVT::v4i16, Promote); |
664 | AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32); |
665 | setOperationAction(ISD::LOAD, MVT::v4f16, Promote); |
666 | AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32); |
667 | |
668 | setOperationAction(ISD::STORE, MVT::v4i16, Promote); |
669 | AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); |
670 | setOperationAction(ISD::STORE, MVT::v4f16, Promote); |
671 | AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); |
672 | |
673 | setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); |
674 | setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); |
675 | setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); |
676 | setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); |
677 | |
678 | setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand); |
679 | setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand); |
680 | setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand); |
681 | |
682 | if (!Subtarget->hasVOP3PInsts()) { |
683 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom); |
684 | setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); |
685 | } |
686 | |
687 | setOperationAction(ISD::FNEG, MVT::v2f16, Legal); |
688 | |
689 | |
690 | setOperationAction(ISD::FABS, MVT::v2f16, Legal); |
691 | |
692 | setOperationAction(ISD::FMAXNUM, MVT::f16, Custom); |
693 | setOperationAction(ISD::FMINNUM, MVT::f16, Custom); |
694 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::f16, Legal); |
695 | setOperationAction(ISD::FMINNUM_IEEE, MVT::f16, Legal); |
696 | |
697 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v4f16, Custom); |
698 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v4f16, Custom); |
699 | |
700 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Expand); |
701 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Expand); |
702 | } |
703 | |
704 | if (Subtarget->hasVOP3PInsts()) { |
705 | setOperationAction(ISD::ADD, MVT::v2i16, Legal); |
706 | setOperationAction(ISD::SUB, MVT::v2i16, Legal); |
707 | setOperationAction(ISD::MUL, MVT::v2i16, Legal); |
708 | setOperationAction(ISD::SHL, MVT::v2i16, Legal); |
709 | setOperationAction(ISD::SRL, MVT::v2i16, Legal); |
710 | setOperationAction(ISD::SRA, MVT::v2i16, Legal); |
711 | setOperationAction(ISD::SMIN, MVT::v2i16, Legal); |
712 | setOperationAction(ISD::UMIN, MVT::v2i16, Legal); |
713 | setOperationAction(ISD::SMAX, MVT::v2i16, Legal); |
714 | setOperationAction(ISD::UMAX, MVT::v2i16, Legal); |
715 | |
716 | setOperationAction(ISD::UADDSAT, MVT::v2i16, Legal); |
717 | setOperationAction(ISD::USUBSAT, MVT::v2i16, Legal); |
718 | setOperationAction(ISD::SADDSAT, MVT::v2i16, Legal); |
719 | setOperationAction(ISD::SSUBSAT, MVT::v2i16, Legal); |
720 | |
721 | setOperationAction(ISD::FADD, MVT::v2f16, Legal); |
722 | setOperationAction(ISD::FMUL, MVT::v2f16, Legal); |
723 | setOperationAction(ISD::FMA, MVT::v2f16, Legal); |
724 | |
725 | setOperationAction(ISD::FMINNUM_IEEE, MVT::v2f16, Legal); |
726 | setOperationAction(ISD::FMAXNUM_IEEE, MVT::v2f16, Legal); |
727 | |
728 | setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal); |
729 | |
730 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); |
731 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); |
732 | |
733 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f16, Custom); |
734 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); |
735 | |
736 | setOperationAction(ISD::SHL, MVT::v4i16, Custom); |
737 | setOperationAction(ISD::SRA, MVT::v4i16, Custom); |
738 | setOperationAction(ISD::SRL, MVT::v4i16, Custom); |
739 | setOperationAction(ISD::ADD, MVT::v4i16, Custom); |
740 | setOperationAction(ISD::SUB, MVT::v4i16, Custom); |
741 | setOperationAction(ISD::MUL, MVT::v4i16, Custom); |
742 | |
743 | setOperationAction(ISD::SMIN, MVT::v4i16, Custom); |
744 | setOperationAction(ISD::SMAX, MVT::v4i16, Custom); |
745 | setOperationAction(ISD::UMIN, MVT::v4i16, Custom); |
746 | setOperationAction(ISD::UMAX, MVT::v4i16, Custom); |
747 | |
748 | setOperationAction(ISD::UADDSAT, MVT::v4i16, Custom); |
749 | setOperationAction(ISD::SADDSAT, MVT::v4i16, Custom); |
750 | setOperationAction(ISD::USUBSAT, MVT::v4i16, Custom); |
751 | setOperationAction(ISD::SSUBSAT, MVT::v4i16, Custom); |
752 | |
753 | setOperationAction(ISD::FADD, MVT::v4f16, Custom); |
754 | setOperationAction(ISD::FMUL, MVT::v4f16, Custom); |
755 | setOperationAction(ISD::FMA, MVT::v4f16, Custom); |
756 | |
757 | setOperationAction(ISD::FMAXNUM, MVT::v2f16, Custom); |
758 | setOperationAction(ISD::FMINNUM, MVT::v2f16, Custom); |
759 | |
760 | setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom); |
761 | setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); |
762 | setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom); |
763 | |
764 | setOperationAction(ISD::FEXP, MVT::v2f16, Custom); |
765 | setOperationAction(ISD::SELECT, MVT::v4i16, Custom); |
766 | setOperationAction(ISD::SELECT, MVT::v4f16, Custom); |
767 | |
768 | if (Subtarget->hasPackedFP32Ops()) { |
769 | setOperationAction(ISD::FADD, MVT::v2f32, Legal); |
770 | setOperationAction(ISD::FMUL, MVT::v2f32, Legal); |
771 | setOperationAction(ISD::FMA, MVT::v2f32, Legal); |
772 | setOperationAction(ISD::FNEG, MVT::v2f32, Legal); |
773 | |
774 | for (MVT VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32 }) { |
775 | setOperationAction(ISD::FADD, VT, Custom); |
776 | setOperationAction(ISD::FMUL, VT, Custom); |
777 | setOperationAction(ISD::FMA, VT, Custom); |
778 | } |
779 | } |
780 | } |
781 | |
782 | setOperationAction(ISD::FNEG, MVT::v4f16, Custom); |
783 | setOperationAction(ISD::FABS, MVT::v4f16, Custom); |
784 | |
785 | if (Subtarget->has16BitInsts()) { |
786 | setOperationAction(ISD::SELECT, MVT::v2i16, Promote); |
787 | AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); |
788 | setOperationAction(ISD::SELECT, MVT::v2f16, Promote); |
789 | AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); |
790 | } else { |
791 | |
792 | setOperationAction(ISD::SELECT, MVT::v2i16, Custom); |
793 | setOperationAction(ISD::SELECT, MVT::v2f16, Custom); |
794 | |
795 | setOperationAction(ISD::FNEG, MVT::v2f16, Custom); |
796 | setOperationAction(ISD::FABS, MVT::v2f16, Custom); |
797 | } |
798 | |
799 | for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { |
800 | setOperationAction(ISD::SELECT, VT, Custom); |
801 | } |
802 | |
803 | setOperationAction(ISD::SMULO, MVT::i64, Custom); |
804 | setOperationAction(ISD::UMULO, MVT::i64, Custom); |
805 | |
806 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); |
807 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); |
808 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); |
809 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); |
810 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f16, Custom); |
811 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); |
812 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); |
813 | |
814 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); |
815 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2i16, Custom); |
816 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3f16, Custom); |
817 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v3i16, Custom); |
818 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); |
819 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4i16, Custom); |
820 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v8f16, Custom); |
821 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); |
822 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::f16, Custom); |
823 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); |
824 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); |
825 | |
826 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); |
827 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); |
828 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); |
829 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v3i16, Custom); |
830 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v3f16, Custom); |
831 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); |
832 | setOperationAction(ISD::INTRINSIC_VOID, MVT::v4i16, Custom); |
833 | setOperationAction(ISD::INTRINSIC_VOID, MVT::f16, Custom); |
834 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); |
835 | setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); |
836 | |
837 | setTargetDAGCombine(ISD::ADD); |
838 | setTargetDAGCombine(ISD::ADDCARRY); |
839 | setTargetDAGCombine(ISD::SUB); |
840 | setTargetDAGCombine(ISD::SUBCARRY); |
841 | setTargetDAGCombine(ISD::FADD); |
842 | setTargetDAGCombine(ISD::FSUB); |
843 | setTargetDAGCombine(ISD::FMINNUM); |
844 | setTargetDAGCombine(ISD::FMAXNUM); |
845 | setTargetDAGCombine(ISD::FMINNUM_IEEE); |
846 | setTargetDAGCombine(ISD::FMAXNUM_IEEE); |
847 | setTargetDAGCombine(ISD::FMA); |
848 | setTargetDAGCombine(ISD::SMIN); |
849 | setTargetDAGCombine(ISD::SMAX); |
850 | setTargetDAGCombine(ISD::UMIN); |
851 | setTargetDAGCombine(ISD::UMAX); |
852 | setTargetDAGCombine(ISD::SETCC); |
853 | setTargetDAGCombine(ISD::AND); |
854 | setTargetDAGCombine(ISD::OR); |
855 | setTargetDAGCombine(ISD::XOR); |
856 | setTargetDAGCombine(ISD::SINT_TO_FP); |
857 | setTargetDAGCombine(ISD::UINT_TO_FP); |
858 | setTargetDAGCombine(ISD::FCANONICALIZE); |
859 | setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); |
860 | setTargetDAGCombine(ISD::ZERO_EXTEND); |
861 | setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); |
862 | setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); |
863 | setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); |
864 | |
865 | |
866 | |
867 | setTargetDAGCombine(ISD::LOAD); |
868 | setTargetDAGCombine(ISD::STORE); |
869 | setTargetDAGCombine(ISD::ATOMIC_LOAD); |
870 | setTargetDAGCombine(ISD::ATOMIC_STORE); |
871 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); |
872 | setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); |
873 | setTargetDAGCombine(ISD::ATOMIC_SWAP); |
874 | setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); |
875 | setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); |
876 | setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); |
877 | setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); |
878 | setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); |
879 | setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); |
880 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); |
881 | setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); |
882 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); |
883 | setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); |
884 | setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD); |
885 | setTargetDAGCombine(ISD::INTRINSIC_VOID); |
886 | setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); |
887 | |
888 | |
889 | setStackPointerRegisterToSaveRestore(AMDGPU::SGPR32); |
890 | |
891 | setSchedulingPreference(Sched::RegPressure); |
892 | } |
893 | |
894 | const GCNSubtarget *SITargetLowering::getSubtarget() const { |
895 | return Subtarget; |
896 | } |
897 | |
898 | |
899 | |
900 | |
901 | |
902 | |
903 | |
904 | |
905 | |
906 | bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, |
907 | EVT DestVT, EVT SrcVT) const { |
908 | return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) || |
909 | (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) && |
910 | DestVT.getScalarType() == MVT::f32 && |
911 | SrcVT.getScalarType() == MVT::f16 && |
912 | |
913 | !hasFP32Denormals(DAG.getMachineFunction()); |
914 | } |
915 | |
916 | bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { |
917 | |
918 | |
919 | return false; |
920 | } |
921 | |
922 | MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, |
923 | CallingConv::ID CC, |
924 | EVT VT) const { |
925 | if (CC == CallingConv::AMDGPU_KERNEL) |
926 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); |
927 | |
928 | if (VT.isVector()) { |
929 | EVT ScalarVT = VT.getScalarType(); |
930 | unsigned Size = ScalarVT.getSizeInBits(); |
931 | if (Size == 16) { |
932 | if (Subtarget->has16BitInsts()) |
933 | return VT.isInteger() ? MVT::v2i16 : MVT::v2f16; |
934 | return VT.isInteger() ? MVT::i32 : MVT::f32; |
935 | } |
936 | |
937 | if (Size < 16) |
938 | return Subtarget->has16BitInsts() ? MVT::i16 : MVT::i32; |
939 | return Size == 32 ? ScalarVT.getSimpleVT() : MVT::i32; |
940 | } |
941 | |
942 | if (VT.getSizeInBits() > 32) |
943 | return MVT::i32; |
944 | |
945 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); |
946 | } |
947 | |
948 | unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, |
949 | CallingConv::ID CC, |
950 | EVT VT) const { |
951 | if (CC == CallingConv::AMDGPU_KERNEL) |
952 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); |
953 | |
954 | if (VT.isVector()) { |
955 | unsigned NumElts = VT.getVectorNumElements(); |
956 | EVT ScalarVT = VT.getScalarType(); |
957 | unsigned Size = ScalarVT.getSizeInBits(); |
958 | |
959 | |
960 | if (Size == 16 && Subtarget->has16BitInsts()) |
961 | return (NumElts + 1) / 2; |
962 | |
963 | if (Size <= 32) |
964 | return NumElts; |
965 | |
966 | if (Size > 32) |
967 | return NumElts * ((Size + 31) / 32); |
968 | } else if (VT.getSizeInBits() > 32) |
969 | return (VT.getSizeInBits() + 31) / 32; |
970 | |
971 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); |
972 | } |
973 | |
974 | unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv( |
975 | LLVMContext &Context, CallingConv::ID CC, |
976 | EVT VT, EVT &IntermediateVT, |
977 | unsigned &NumIntermediates, MVT &RegisterVT) const { |
978 | if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { |
979 | unsigned NumElts = VT.getVectorNumElements(); |
980 | EVT ScalarVT = VT.getScalarType(); |
981 | unsigned Size = ScalarVT.getSizeInBits(); |
982 | |
983 | |
984 | |
985 | if (Size == 16 && Subtarget->has16BitInsts()) { |
986 | RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16; |
987 | IntermediateVT = RegisterVT; |
988 | NumIntermediates = (NumElts + 1) / 2; |
989 | return NumIntermediates; |
990 | } |
991 | |
992 | if (Size == 32) { |
993 | RegisterVT = ScalarVT.getSimpleVT(); |
994 | IntermediateVT = RegisterVT; |
995 | NumIntermediates = NumElts; |
996 | return NumIntermediates; |
997 | } |
998 | |
999 | if (Size < 16 && Subtarget->has16BitInsts()) { |
1000 | |
1001 | RegisterVT = MVT::i16; |
1002 | IntermediateVT = ScalarVT; |
1003 | NumIntermediates = NumElts; |
1004 | return NumIntermediates; |
1005 | } |
1006 | |
1007 | |
1008 | if (Size != 16 && Size <= 32) { |
1009 | RegisterVT = MVT::i32; |
1010 | IntermediateVT = ScalarVT; |
1011 | NumIntermediates = NumElts; |
1012 | return NumIntermediates; |
1013 | } |
1014 | |
1015 | if (Size > 32) { |
1016 | RegisterVT = MVT::i32; |
1017 | IntermediateVT = RegisterVT; |
1018 | NumIntermediates = NumElts * ((Size + 31) / 32); |
1019 | return NumIntermediates; |
1020 | } |
1021 | } |
1022 | |
1023 | return TargetLowering::getVectorTypeBreakdownForCallingConv( |
1024 | Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT); |
1025 | } |
1026 | |
1027 | static EVT memVTFromImageData(Type *Ty, unsigned DMaskLanes) { |
1028 | assert(DMaskLanes != 0); |
1029 | |
1030 | if (auto *VT = dyn_cast<FixedVectorType>(Ty)) { |
1031 | unsigned NumElts = std::min(DMaskLanes, VT->getNumElements()); |
1032 | return EVT::getVectorVT(Ty->getContext(), |
1033 | EVT::getEVT(VT->getElementType()), |
1034 | NumElts); |
1035 | } |
1036 | |
1037 | return EVT::getEVT(Ty); |
1038 | } |
1039 | |
1040 | |
1041 | static EVT memVTFromImageReturn(Type *Ty, unsigned DMaskLanes) { |
1042 | auto *ST = dyn_cast<StructType>(Ty); |
1043 | if (!ST) |
1044 | return memVTFromImageData(Ty, DMaskLanes); |
1045 | |
1046 | |
1047 | |
1048 | |
1049 | |
1050 | if (ST->getNumContainedTypes() != 2 || |
1051 | !ST->getContainedType(1)->isIntegerTy(32)) |
1052 | return EVT(); |
1053 | return memVTFromImageData(ST->getContainedType(0), DMaskLanes); |
1054 | } |
1055 | |
1056 | bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, |
1057 | const CallInst &CI, |
1058 | MachineFunction &MF, |
1059 | unsigned IntrID) const { |
1060 | if (const AMDGPU::RsrcIntrinsic *RsrcIntr = |
1061 | AMDGPU::lookupRsrcIntrinsic(IntrID)) { |
1062 | AttributeList Attr = Intrinsic::getAttributes(CI.getContext(), |
1063 | (Intrinsic::ID)IntrID); |
1064 | if (Attr.hasFnAttribute(Attribute::ReadNone)) |
1065 | return false; |
1066 | |
1067 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
1068 | |
1069 | if (RsrcIntr->IsImage) { |
1070 | Info.ptrVal = |
1071 | MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
1072 | Info.align.reset(); |
1073 | } else { |
1074 | Info.ptrVal = |
1075 | MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
1076 | } |
1077 | |
1078 | Info.flags = MachineMemOperand::MODereferenceable; |
1079 | if (Attr.hasFnAttribute(Attribute::ReadOnly)) { |
1080 | unsigned DMaskLanes = 4; |
1081 | |
1082 | if (RsrcIntr->IsImage) { |
1083 | const AMDGPU::ImageDimIntrinsicInfo *Intr |
1084 | = AMDGPU::getImageDimIntrinsicInfo(IntrID); |
1085 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = |
1086 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); |
1087 | |
1088 | if (!BaseOpcode->Gather4) { |
1089 | |
1090 | |
1091 | unsigned DMask |
1092 | = cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue(); |
1093 | DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); |
1094 | } |
1095 | |
1096 | Info.memVT = memVTFromImageReturn(CI.getType(), DMaskLanes); |
1097 | } else |
1098 | Info.memVT = EVT::getEVT(CI.getType()); |
1099 | |
1100 | |
1101 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
1102 | Info.flags |= MachineMemOperand::MOLoad; |
1103 | } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) { |
1104 | Info.opc = ISD::INTRINSIC_VOID; |
1105 | |
1106 | Type *DataTy = CI.getArgOperand(0)->getType(); |
1107 | if (RsrcIntr->IsImage) { |
1108 | unsigned DMask = cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue(); |
1109 | unsigned DMaskLanes = DMask == 0 ? 1 : countPopulation(DMask); |
1110 | Info.memVT = memVTFromImageData(DataTy, DMaskLanes); |
1111 | } else |
1112 | Info.memVT = EVT::getEVT(DataTy); |
1113 | |
1114 | Info.flags |= MachineMemOperand::MOStore; |
1115 | } else { |
1116 | |
1117 | Info.opc = CI.getType()->isVoidTy() ? ISD::INTRINSIC_VOID : |
1118 | ISD::INTRINSIC_W_CHAIN; |
1119 | Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); |
1120 | Info.flags = MachineMemOperand::MOLoad | |
1121 | MachineMemOperand::MOStore | |
1122 | MachineMemOperand::MODereferenceable; |
1123 | |
1124 | |
1125 | Info.flags |= MachineMemOperand::MOVolatile; |
1126 | } |
1127 | return true; |
1128 | } |
1129 | |
1130 | switch (IntrID) { |
1131 | case Intrinsic::amdgcn_atomic_inc: |
1132 | case Intrinsic::amdgcn_atomic_dec: |
1133 | case Intrinsic::amdgcn_ds_ordered_add: |
1134 | case Intrinsic::amdgcn_ds_ordered_swap: |
1135 | case Intrinsic::amdgcn_ds_fadd: |
1136 | case Intrinsic::amdgcn_ds_fmin: |
1137 | case Intrinsic::amdgcn_ds_fmax: { |
1138 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
1139 | Info.memVT = MVT::getVT(CI.getType()); |
1140 | Info.ptrVal = CI.getOperand(0); |
1141 | Info.align.reset(); |
1142 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
1143 | |
1144 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(4)); |
1145 | if (!Vol->isZero()) |
1146 | Info.flags |= MachineMemOperand::MOVolatile; |
1147 | |
1148 | return true; |
1149 | } |
1150 | case Intrinsic::amdgcn_buffer_atomic_fadd: { |
1151 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
1152 | |
1153 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
1154 | Info.memVT = MVT::getVT(CI.getOperand(0)->getType()); |
1155 | Info.ptrVal = |
1156 | MFI->getBufferPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
1157 | Info.align.reset(); |
1158 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
1159 | |
1160 | const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); |
1161 | if (!Vol || !Vol->isZero()) |
1162 | Info.flags |= MachineMemOperand::MOVolatile; |
1163 | |
1164 | return true; |
1165 | } |
1166 | case Intrinsic::amdgcn_ds_append: |
1167 | case Intrinsic::amdgcn_ds_consume: { |
1168 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
1169 | Info.memVT = MVT::getVT(CI.getType()); |
1170 | Info.ptrVal = CI.getOperand(0); |
1171 | Info.align.reset(); |
1172 | Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; |
1173 | |
1174 | const ConstantInt *Vol = cast<ConstantInt>(CI.getOperand(1)); |
1175 | if (!Vol->isZero()) |
1176 | Info.flags |= MachineMemOperand::MOVolatile; |
1177 | |
1178 | return true; |
1179 | } |
1180 | case Intrinsic::amdgcn_global_atomic_csub: { |
1181 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
1182 | Info.memVT = MVT::getVT(CI.getType()); |
1183 | Info.ptrVal = CI.getOperand(0); |
1184 | Info.align.reset(); |
1185 | Info.flags = MachineMemOperand::MOLoad | |
1186 | MachineMemOperand::MOStore | |
1187 | MachineMemOperand::MOVolatile; |
1188 | return true; |
1189 | } |
1190 | case Intrinsic::amdgcn_image_bvh_intersect_ray: { |
1191 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
1192 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
1193 | Info.memVT = MVT::getVT(CI.getType()); |
1194 | Info.ptrVal = |
1195 | MFI->getImagePSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
1196 | Info.align.reset(); |
1197 | Info.flags = MachineMemOperand::MOLoad | |
1198 | MachineMemOperand::MODereferenceable; |
1199 | return true; |
1200 | } |
1201 | case Intrinsic::amdgcn_global_atomic_fadd: |
1202 | case Intrinsic::amdgcn_global_atomic_fmin: |
1203 | case Intrinsic::amdgcn_global_atomic_fmax: |
1204 | case Intrinsic::amdgcn_flat_atomic_fadd: |
1205 | case Intrinsic::amdgcn_flat_atomic_fmin: |
1206 | case Intrinsic::amdgcn_flat_atomic_fmax: { |
1207 | Info.opc = ISD::INTRINSIC_W_CHAIN; |
1208 | Info.memVT = MVT::getVT(CI.getType()); |
1209 | Info.ptrVal = CI.getOperand(0); |
1210 | Info.align.reset(); |
1211 | Info.flags = MachineMemOperand::MOLoad | |
1212 | MachineMemOperand::MOStore | |
1213 | MachineMemOperand::MODereferenceable | |
1214 | MachineMemOperand::MOVolatile; |
1215 | return true; |
1216 | } |
1217 | case Intrinsic::amdgcn_ds_gws_init: |
1218 | case Intrinsic::amdgcn_ds_gws_barrier: |
1219 | case Intrinsic::amdgcn_ds_gws_sema_v: |
1220 | case Intrinsic::amdgcn_ds_gws_sema_br: |
1221 | case Intrinsic::amdgcn_ds_gws_sema_p: |
1222 | case Intrinsic::amdgcn_ds_gws_sema_release_all: { |
1223 | Info.opc = ISD::INTRINSIC_VOID; |
1224 | |
1225 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
1226 | Info.ptrVal = |
1227 | MFI->getGWSPSV(*MF.getSubtarget<GCNSubtarget>().getInstrInfo()); |
1228 | |
1229 | |
1230 | Info.memVT = MVT::i32; |
1231 | Info.size = 4; |
1232 | Info.align = Align(4); |
1233 | |
1234 | Info.flags = MachineMemOperand::MOStore; |
1235 | if (IntrID == Intrinsic::amdgcn_ds_gws_barrier) |
1236 | Info.flags = MachineMemOperand::MOLoad; |
1237 | return true; |
1238 | } |
1239 | default: |
1240 | return false; |
1241 | } |
1242 | } |
1243 | |
1244 | bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, |
1245 | SmallVectorImpl<Value*> &Ops, |
1246 | Type *&AccessTy) const { |
1247 | switch (II->getIntrinsicID()) { |
1248 | case Intrinsic::amdgcn_atomic_inc: |
1249 | case Intrinsic::amdgcn_atomic_dec: |
1250 | case Intrinsic::amdgcn_ds_ordered_add: |
1251 | case Intrinsic::amdgcn_ds_ordered_swap: |
1252 | case Intrinsic::amdgcn_ds_append: |
1253 | case Intrinsic::amdgcn_ds_consume: |
1254 | case Intrinsic::amdgcn_ds_fadd: |
1255 | case Intrinsic::amdgcn_ds_fmin: |
1256 | case Intrinsic::amdgcn_ds_fmax: |
1257 | case Intrinsic::amdgcn_global_atomic_fadd: |
1258 | case Intrinsic::amdgcn_flat_atomic_fadd: |
1259 | case Intrinsic::amdgcn_flat_atomic_fmin: |
1260 | case Intrinsic::amdgcn_flat_atomic_fmax: |
1261 | case Intrinsic::amdgcn_global_atomic_csub: { |
1262 | Value *Ptr = II->getArgOperand(0); |
1263 | AccessTy = II->getType(); |
1264 | Ops.push_back(Ptr); |
1265 | return true; |
1266 | } |
1267 | default: |
1268 | return false; |
1269 | } |
1270 | } |
1271 | |
1272 | bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { |
1273 | if (!Subtarget->hasFlatInstOffsets()) { |
1274 | |
1275 | |
1276 | return AM.BaseOffs == 0 && AM.Scale == 0; |
1277 | } |
1278 | |
1279 | return AM.Scale == 0 && |
1280 | (AM.BaseOffs == 0 || |
1281 | Subtarget->getInstrInfo()->isLegalFLATOffset( |
1282 | AM.BaseOffs, AMDGPUAS::FLAT_ADDRESS, SIInstrFlags::FLAT)); |
1283 | } |
1284 | |
1285 | bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { |
1286 | if (Subtarget->hasFlatGlobalInsts()) |
1287 | return AM.Scale == 0 && |
1288 | (AM.BaseOffs == 0 || Subtarget->getInstrInfo()->isLegalFLATOffset( |
1289 | AM.BaseOffs, AMDGPUAS::GLOBAL_ADDRESS, |
1290 | SIInstrFlags::FlatGlobal)); |
1291 | |
1292 | if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { |
1293 | |
1294 | |
1295 | |
1296 | |
1297 | |
1298 | |
1299 | |
1300 | |
1301 | |
1302 | return isLegalFlatAddressingMode(AM); |
1303 | } |
1304 | |
1305 | return isLegalMUBUFAddressingMode(AM); |
1306 | } |
1307 | |
1308 | bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { |
1309 | |
1310 | |
1311 | |
1312 | |
1313 | |
1314 | |
1315 | |
1316 | |
1317 | |
1318 | if (!SIInstrInfo::isLegalMUBUFImmOffset(AM.BaseOffs)) |
1319 | return false; |
1320 | |
1321 | |
1322 | |
1323 | |
1324 | switch (AM.Scale) { |
1325 | case 0: |
1326 | return true; |
1327 | case 1: |
1328 | return true; |
1329 | case 2: |
1330 | if (AM.HasBaseReg) { |
1331 | |
1332 | return false; |
1333 | } |
1334 | |
1335 | |
1336 | |
1337 | return true; |
1338 | default: |
1339 | return false; |
1340 | } |
1341 | } |
1342 | |
1343 | bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, |
1344 | const AddrMode &AM, Type *Ty, |
1345 | unsigned AS, Instruction *I) const { |
1346 | |
1347 | if (AM.BaseGV) |
1348 | return false; |
1349 | |
1350 | if (AS == AMDGPUAS::GLOBAL_ADDRESS) |
1351 | return isLegalGlobalAddressingMode(AM); |
1352 | |
1353 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
1354 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || |
1355 | AS == AMDGPUAS::BUFFER_FAT_POINTER) { |
1356 | |
1357 | |
1358 | |
1359 | if (AM.BaseOffs % 4 != 0) |
1360 | return isLegalMUBUFAddressingMode(AM); |
1361 | |
1362 | |
1363 | |
1364 | |
1365 | |
1366 | if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4) |
1367 | return isLegalGlobalAddressingMode(AM); |
1368 | |
1369 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { |
1370 | |
1371 | if (!isUInt<8>(AM.BaseOffs / 4)) |
1372 | return false; |
1373 | } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { |
1374 | |
1375 | |
1376 | if (!isUInt<32>(AM.BaseOffs / 4)) |
1377 | return false; |
1378 | } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { |
1379 | |
1380 | if (!isUInt<20>(AM.BaseOffs)) |
1381 | return false; |
1382 | } else |
1383 | llvm_unreachable("unhandled generation"); |
1384 | |
1385 | if (AM.Scale == 0) |
1386 | return true; |
1387 | |
1388 | if (AM.Scale == 1 && AM.HasBaseReg) |
1389 | return true; |
1390 | |
1391 | return false; |
1392 | |
1393 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
1394 | return isLegalMUBUFAddressingMode(AM); |
1395 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || |
1396 | AS == AMDGPUAS::REGION_ADDRESS) { |
1397 | |
1398 | |
1399 | |
1400 | |
1401 | if (!isUInt<16>(AM.BaseOffs)) |
1402 | return false; |
1403 | |
1404 | if (AM.Scale == 0) |
1405 | return true; |
1406 | |
1407 | if (AM.Scale == 1 && AM.HasBaseReg) |
1408 | return true; |
1409 | |
1410 | return false; |
1411 | } else if (AS == AMDGPUAS::FLAT_ADDRESS || |
1412 | AS == AMDGPUAS::UNKNOWN_ADDRESS_SPACE) { |
1413 | |
1414 | |
1415 | |
1416 | |
1417 | |
1418 | return isLegalFlatAddressingMode(AM); |
1419 | } |
1420 | |
1421 | |
1422 | return isLegalGlobalAddressingMode(AM); |
1423 | } |
1424 | |
1425 | bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, |
1426 | const SelectionDAG &DAG) const { |
1427 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) { |
1428 | return (MemVT.getSizeInBits() <= 4 * 32); |
1429 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
1430 | unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); |
1431 | return (MemVT.getSizeInBits() <= MaxPrivateBits); |
1432 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { |
1433 | return (MemVT.getSizeInBits() <= 2 * 32); |
1434 | } |
1435 | return true; |
1436 | } |
1437 | |
1438 | bool SITargetLowering::allowsMisalignedMemoryAccessesImpl( |
1439 | unsigned Size, unsigned AddrSpace, Align Alignment, |
1440 | MachineMemOperand::Flags Flags, bool *IsFast) const { |
1441 | if (IsFast) |
1442 | *IsFast = false; |
1443 | |
1444 | if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || |
1445 | AddrSpace == AMDGPUAS::REGION_ADDRESS) { |
1446 | |
1447 | |
1448 | if (Subtarget->hasUnalignedDSAccessEnabled() && |
1449 | !Subtarget->hasLDSMisalignedBug()) { |
1450 | if (IsFast) |
1451 | *IsFast = Alignment != Align(2); |
1452 | return true; |
1453 | } |
1454 | |
1455 | |
1456 | |
1457 | |
1458 | |
1459 | |
1460 | if (Size == 64) { |
1461 | |
1462 | |
1463 | |
1464 | bool AlignedBy4 = Alignment >= Align(4); |
1465 | if (IsFast) |
1466 | *IsFast = AlignedBy4; |
1467 | |
1468 | return AlignedBy4; |
1469 | } |
1470 | if (Size == 96) { |
1471 | |
1472 | |
1473 | bool AlignedBy16 = Alignment >= Align(16); |
1474 | if (IsFast) |
1475 | *IsFast = AlignedBy16; |
1476 | |
1477 | return AlignedBy16; |
1478 | } |
1479 | if (Size == 128) { |
1480 | |
1481 | |
1482 | |
1483 | bool AlignedBy8 = Alignment >= Align(8); |
1484 | if (IsFast) |
1485 | *IsFast = AlignedBy8; |
1486 | |
1487 | return AlignedBy8; |
1488 | } |
1489 | } |
1490 | |
1491 | if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) { |
1492 | bool AlignedBy4 = Alignment >= Align(4); |
1493 | if (IsFast) |
1494 | *IsFast = AlignedBy4; |
1495 | |
1496 | return AlignedBy4 || |
1497 | Subtarget->enableFlatScratch() || |
1498 | Subtarget->hasUnalignedScratchAccess(); |
1499 | } |
1500 | |
1501 | |
1502 | |
1503 | |
1504 | if (AddrSpace == AMDGPUAS::FLAT_ADDRESS && |
1505 | !Subtarget->hasUnalignedScratchAccess()) { |
1506 | bool AlignedBy4 = Alignment >= Align(4); |
1507 | if (IsFast) |
1508 | *IsFast = AlignedBy4; |
1509 | |
1510 | return AlignedBy4; |
1511 | } |
1512 | |
1513 | if (Subtarget->hasUnalignedBufferAccessEnabled() && |
1514 | !(AddrSpace == AMDGPUAS::LOCAL_ADDRESS || |
1515 | AddrSpace == AMDGPUAS::REGION_ADDRESS)) { |
1516 | |
1517 | |
1518 | if (IsFast) { |
1519 | |
1520 | |
1521 | *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS || |
1522 | AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT) ? |
1523 | Alignment >= Align(4) : Alignment != Align(2); |
1524 | } |
1525 | |
1526 | return true; |
1527 | } |
1528 | |
1529 | |
1530 | if (Size < 32) |
1531 | return false; |
1532 | |
1533 | |
1534 | |
1535 | |
1536 | if (IsFast) |
1537 | *IsFast = true; |
1538 | |
1539 | return Size >= 32 && Alignment >= Align(4); |
1540 | } |
1541 | |
1542 | bool SITargetLowering::allowsMisalignedMemoryAccesses( |
1543 | EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, |
1544 | bool *IsFast) const { |
1545 | if (IsFast) |
1546 | *IsFast = false; |
1547 | |
1548 | |
1549 | |
1550 | |
1551 | |
1552 | if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && |
1553 | VT.getStoreSize() > 16)) { |
1554 | return false; |
1555 | } |
1556 | |
1557 | return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace, |
1558 | Alignment, Flags, IsFast); |
1559 | } |
1560 | |
1561 | EVT SITargetLowering::getOptimalMemOpType( |
1562 | const MemOp &Op, const AttributeList &FuncAttributes) const { |
1563 | |
1564 | |
1565 | |
1566 | |
1567 | |
1568 | if (Op.size() >= 16 && |
1569 | Op.isDstAligned(Align(4))) |
1570 | return MVT::v4i32; |
1571 | |
1572 | if (Op.size() >= 8 && Op.isDstAligned(Align(4))) |
1573 | return MVT::v2i32; |
1574 | |
1575 | |
1576 | return MVT::Other; |
1577 | } |
1578 | |
1579 | bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { |
1580 | const MemSDNode *MemNode = cast<MemSDNode>(N); |
1581 | const Value *Ptr = MemNode->getMemOperand()->getValue(); |
1582 | const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); |
1583 | return I && I->getMetadata("amdgpu.noclobber"); |
1584 | } |
1585 | |
1586 | bool SITargetLowering::isNonGlobalAddrSpace(unsigned AS) { |
1587 | return AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS || |
1588 | AS == AMDGPUAS::PRIVATE_ADDRESS; |
1589 | } |
1590 | |
1591 | bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS, |
1592 | unsigned DestAS) const { |
1593 | |
1594 | |
1595 | if (SrcAS == AMDGPUAS::FLAT_ADDRESS) |
1596 | return true; |
1597 | |
1598 | const GCNTargetMachine &TM = |
1599 | static_cast<const GCNTargetMachine &>(getTargetMachine()); |
1600 | return TM.isNoopAddrSpaceCast(SrcAS, DestAS); |
1601 | } |
1602 | |
1603 | bool SITargetLowering::isMemOpUniform(const SDNode *N) const { |
1604 | const MemSDNode *MemNode = cast<MemSDNode>(N); |
1605 | |
1606 | return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); |
1607 | } |
1608 | |
1609 | TargetLoweringBase::LegalizeTypeAction |
1610 | SITargetLowering::getPreferredVectorAction(MVT VT) const { |
1611 | if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 && |
1612 | VT.getScalarType().bitsLE(MVT::i16)) |
1613 | return VT.isPow2VectorType() ? TypeSplitVector : TypeWidenVector; |
1614 | return TargetLoweringBase::getPreferredVectorAction(VT); |
1615 | } |
1616 | |
1617 | bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, |
1618 | Type *Ty) const { |
1619 | |
1620 | return true; |
1621 | } |
1622 | |
1623 | bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { |
1624 | if (Subtarget->has16BitInsts() && VT == MVT::i16) { |
1625 | switch (Op) { |
1626 | case ISD::LOAD: |
1627 | case ISD::STORE: |
1628 | |
1629 | |
1630 | case ISD::AND: |
1631 | case ISD::OR: |
1632 | case ISD::XOR: |
1633 | case ISD::SELECT: |
1634 | |
1635 | return true; |
1636 | default: |
1637 | return false; |
1638 | } |
1639 | } |
1640 | |
1641 | |
1642 | |
1643 | if (VT == MVT::i1 && Op == ISD::SETCC) |
1644 | return false; |
1645 | |
1646 | return TargetLowering::isTypeDesirableForOp(Op, VT); |
1647 | } |
1648 | |
1649 | SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, |
1650 | const SDLoc &SL, |
1651 | SDValue Chain, |
1652 | uint64_t Offset) const { |
1653 | const DataLayout &DL = DAG.getDataLayout(); |
1654 | MachineFunction &MF = DAG.getMachineFunction(); |
1655 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
1656 | |
1657 | const ArgDescriptor *InputPtrReg; |
1658 | const TargetRegisterClass *RC; |
1659 | LLT ArgTy; |
1660 | |
1661 | std::tie(InputPtrReg, RC, ArgTy) = |
1662 | Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); |
1663 | |
1664 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
1665 | MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); |
1666 | SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, |
1667 | MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); |
1668 | |
1669 | return DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Offset)); |
1670 | } |
1671 | |
1672 | SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, |
1673 | const SDLoc &SL) const { |
1674 | uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(), |
1675 | FIRST_IMPLICIT); |
1676 | return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); |
1677 | } |
1678 | |
1679 | SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, |
1680 | const SDLoc &SL, SDValue Val, |
1681 | bool Signed, |
1682 | const ISD::InputArg *Arg) const { |
1683 | |
1684 | if (VT.isVector() && |
1685 | VT.getVectorNumElements() != MemVT.getVectorNumElements()) { |
1686 | EVT NarrowedVT = |
1687 | EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), |
1688 | VT.getVectorNumElements()); |
1689 | Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, NarrowedVT, Val, |
1690 | DAG.getConstant(0, SL, MVT::i32)); |
1691 | } |
1692 | |
1693 | |
1694 | if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && |
1695 | VT.bitsLT(MemVT)) { |
1696 | unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; |
1697 | Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); |
1698 | } |
1699 | |
1700 | if (MemVT.isFloatingPoint()) |
1701 | Val = getFPExtOrFPRound(DAG, Val, SL, VT); |
1702 | else if (Signed) |
1703 | Val = DAG.getSExtOrTrunc(Val, SL, VT); |
1704 | else |
1705 | Val = DAG.getZExtOrTrunc(Val, SL, VT); |
1706 | |
1707 | return Val; |
1708 | } |
1709 | |
1710 | SDValue SITargetLowering::lowerKernargMemParameter( |
1711 | SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Chain, |
1712 | uint64_t Offset, Align Alignment, bool Signed, |
1713 | const ISD::InputArg *Arg) const { |
1714 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); |
1715 | |
1716 | |
1717 | |
1718 | |
1719 | if (MemVT.getStoreSize() < 4 && Alignment < 4) { |
1720 | |
1721 | int64_t AlignDownOffset = alignDown(Offset, 4); |
1722 | int64_t OffsetDiff = Offset - AlignDownOffset; |
1723 | |
1724 | EVT IntVT = MemVT.changeTypeToInteger(); |
1725 | |
1726 | |
1727 | |
1728 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset); |
1729 | SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, Align(4), |
1730 | MachineMemOperand::MODereferenceable | |
1731 | MachineMemOperand::MOInvariant); |
1732 | |
1733 | SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32); |
1734 | SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt); |
1735 | |
1736 | SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract); |
1737 | ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal); |
1738 | ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg); |
1739 | |
1740 | |
1741 | return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL); |
1742 | } |
1743 | |
1744 | SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); |
1745 | SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Alignment, |
1746 | MachineMemOperand::MODereferenceable | |
1747 | MachineMemOperand::MOInvariant); |
1748 | |
1749 | SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); |
1750 | return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); |
1751 | } |
1752 | |
1753 | SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, |
1754 | const SDLoc &SL, SDValue Chain, |
1755 | const ISD::InputArg &Arg) const { |
1756 | MachineFunction &MF = DAG.getMachineFunction(); |
1757 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
1758 | |
1759 | if (Arg.Flags.isByVal()) { |
1760 | unsigned Size = Arg.Flags.getByValSize(); |
1761 | int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); |
1762 | return DAG.getFrameIndex(FrameIdx, MVT::i32); |
1763 | } |
1764 | |
1765 | unsigned ArgOffset = VA.getLocMemOffset(); |
1766 | unsigned ArgSize = VA.getValVT().getStoreSize(); |
1767 | |
1768 | int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); |
1769 | |
1770 | |
1771 | SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); |
1772 | SDValue ArgValue; |
1773 | |
1774 | |
1775 | ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; |
1776 | MVT MemVT = VA.getValVT(); |
1777 | |
1778 | switch (VA.getLocInfo()) { |
1779 | default: |
1780 | break; |
1781 | case CCValAssign::BCvt: |
1782 | MemVT = VA.getLocVT(); |
1783 | break; |
1784 | case CCValAssign::SExt: |
1785 | ExtType = ISD::SEXTLOAD; |
1786 | break; |
1787 | case CCValAssign::ZExt: |
1788 | ExtType = ISD::ZEXTLOAD; |
1789 | break; |
1790 | case CCValAssign::AExt: |
1791 | ExtType = ISD::EXTLOAD; |
1792 | break; |
1793 | } |
1794 | |
1795 | ArgValue = DAG.getExtLoad( |
1796 | ExtType, SL, VA.getLocVT(), Chain, FIN, |
1797 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), |
1798 | MemVT); |
1799 | return ArgValue; |
1800 | } |
1801 | |
1802 | SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, |
1803 | const SIMachineFunctionInfo &MFI, |
1804 | EVT VT, |
1805 | AMDGPUFunctionArgInfo::PreloadedValue PVID) const { |
1806 | const ArgDescriptor *Reg; |
1807 | const TargetRegisterClass *RC; |
1808 | LLT Ty; |
1809 | |
1810 | std::tie(Reg, RC, Ty) = MFI.getPreloadedValue(PVID); |
1811 | return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); |
1812 | } |
1813 | |
1814 | static void processPSInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, |
1815 | CallingConv::ID CallConv, |
1816 | ArrayRef<ISD::InputArg> Ins, BitVector &Skipped, |
1817 | FunctionType *FType, |
1818 | SIMachineFunctionInfo *Info) { |
1819 | for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { |
1820 | const ISD::InputArg *Arg = &Ins[I]; |
1821 | |
1822 | assert((!Arg->VT.isVector() || Arg->VT.getScalarSizeInBits() == 16) && |
1823 | "vector type argument should have been split"); |
1824 | |
1825 | |
1826 | if (CallConv == CallingConv::AMDGPU_PS && |
1827 | !Arg->Flags.isInReg() && PSInputNum <= 15) { |
1828 | bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum); |
1829 | |
1830 | |
1831 | |
1832 | |
1833 | if (Arg->Flags.isSplit()) { |
1834 | while (!Arg->Flags.isSplitEnd()) { |
1835 | assert((!Arg->VT.isVector() || |
1836 | Arg->VT.getScalarSizeInBits() == 16) && |
1837 | "unexpected vector split in ps argument type"); |
1838 | if (!SkipArg) |
1839 | Splits.push_back(*Arg); |
1840 | Arg = &Ins[++I]; |
1841 | } |
1842 | } |
1843 | |
1844 | if (SkipArg) { |
1845 | |
1846 | Skipped.set(Arg->getOrigArgIndex()); |
1847 | ++PSInputNum; |
1848 | continue; |
1849 | } |
1850 | |
1851 | Info->markPSInputAllocated(PSInputNum); |
1852 | if (Arg->Used) |
1853 | Info->markPSInputEnabled(PSInputNum); |
1854 | |
1855 | ++PSInputNum; |
1856 | } |
1857 | |
1858 | Splits.push_back(*Arg); |
1859 | } |
1860 | } |
1861 | |
1862 | |
1863 | void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo, |
1864 | MachineFunction &MF, |
1865 | const SIRegisterInfo &TRI, |
1866 | SIMachineFunctionInfo &Info) const { |
1867 | const LLT S32 = LLT::scalar(32); |
1868 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
1869 | |
1870 | if (Info.hasWorkItemIDX()) { |
1871 | Register Reg = AMDGPU::VGPR0; |
1872 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); |
1873 | |
1874 | CCInfo.AllocateReg(Reg); |
1875 | unsigned Mask = (Subtarget->hasPackedTID() && |
1876 | Info.hasWorkItemIDY()) ? 0x3ff : ~0u; |
1877 | Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask)); |
1878 | } |
1879 | |
1880 | if (Info.hasWorkItemIDY()) { |
1881 | assert(Info.hasWorkItemIDX()); |
1882 | if (Subtarget->hasPackedTID()) { |
1883 | Info.setWorkItemIDY(ArgDescriptor::createRegister(AMDGPU::VGPR0, |
1884 | 0x3ff << 10)); |
1885 | } else { |
1886 | unsigned Reg = AMDGPU::VGPR1; |
1887 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); |
1888 | |
1889 | CCInfo.AllocateReg(Reg); |
1890 | Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); |
1891 | } |
1892 | } |
1893 | |
1894 | if (Info.hasWorkItemIDZ()) { |
1895 | assert(Info.hasWorkItemIDX() && Info.hasWorkItemIDY()); |
1896 | if (Subtarget->hasPackedTID()) { |
1897 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(AMDGPU::VGPR0, |
1898 | 0x3ff << 20)); |
1899 | } else { |
1900 | unsigned Reg = AMDGPU::VGPR2; |
1901 | MRI.setType(MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass), S32); |
1902 | |
1903 | CCInfo.AllocateReg(Reg); |
1904 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); |
1905 | } |
1906 | } |
1907 | } |
1908 | |
1909 | |
1910 | |
1911 | |
1912 | |
1913 | static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u, |
1914 | ArgDescriptor Arg = ArgDescriptor()) { |
1915 | if (Arg.isSet()) |
1916 | return ArgDescriptor::createArg(Arg, Mask); |
1917 | |
1918 | ArrayRef<MCPhysReg> ArgVGPRs |
1919 | = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); |
1920 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); |
1921 | if (RegIdx == ArgVGPRs.size()) { |
1922 | |
1923 | int64_t Offset = CCInfo.AllocateStack(4, Align(4)); |
1924 | |
1925 | return ArgDescriptor::createStack(Offset, Mask); |
1926 | } |
1927 | |
1928 | unsigned Reg = ArgVGPRs[RegIdx]; |
1929 | Reg = CCInfo.AllocateReg(Reg); |
1930 | assert(Reg != AMDGPU::NoRegister); |
1931 | |
1932 | MachineFunction &MF = CCInfo.getMachineFunction(); |
1933 | Register LiveInVReg = MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); |
1934 | MF.getRegInfo().setType(LiveInVReg, LLT::scalar(32)); |
1935 | return ArgDescriptor::createRegister(Reg, Mask); |
1936 | } |
1937 | |
1938 | static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, |
1939 | const TargetRegisterClass *RC, |
1940 | unsigned NumArgRegs) { |
1941 | ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); |
1942 | unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); |
1943 | if (RegIdx == ArgSGPRs.size()) |
1944 | report_fatal_error("ran out of SGPRs for arguments"); |
1945 | |
1946 | unsigned Reg = ArgSGPRs[RegIdx]; |
1947 | Reg = CCInfo.AllocateReg(Reg); |
1948 | assert(Reg != AMDGPU::NoRegister); |
1949 | |
1950 | MachineFunction &MF = CCInfo.getMachineFunction(); |
1951 | MF.addLiveIn(Reg, RC); |
1952 | return ArgDescriptor::createRegister(Reg); |
1953 | } |
1954 | |
1955 | |
1956 | |
1957 | |
1958 | static void allocateFixedSGPRInputImpl(CCState &CCInfo, |
1959 | const TargetRegisterClass *RC, |
1960 | MCRegister Reg) { |
1961 | Reg = CCInfo.AllocateReg(Reg); |
1962 | assert(Reg != AMDGPU::NoRegister); |
1963 | MachineFunction &MF = CCInfo.getMachineFunction(); |
1964 | MF.addLiveIn(Reg, RC); |
1965 | } |
1966 | |
1967 | static void allocateSGPR32Input(CCState &CCInfo, ArgDescriptor &Arg) { |
1968 | if (Arg) { |
1969 | allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, |
1970 | Arg.getRegister()); |
1971 | } else |
1972 | Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); |
1973 | } |
1974 | |
1975 | static void allocateSGPR64Input(CCState &CCInfo, ArgDescriptor &Arg) { |
1976 | if (Arg) { |
1977 | allocateFixedSGPRInputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, |
1978 | Arg.getRegister()); |
1979 | } else |
1980 | Arg = allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); |
1981 | } |
1982 | |
1983 | |
1984 | |
1985 | void SITargetLowering::allocateSpecialInputVGPRs( |
1986 | CCState &CCInfo, MachineFunction &MF, |
1987 | const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { |
1988 | const unsigned Mask = 0x3ff; |
1989 | ArgDescriptor Arg; |
1990 | |
1991 | if (Info.hasWorkItemIDX()) { |
1992 | Arg = allocateVGPR32Input(CCInfo, Mask); |
1993 | Info.setWorkItemIDX(Arg); |
1994 | } |
1995 | |
1996 | if (Info.hasWorkItemIDY()) { |
1997 | Arg = allocateVGPR32Input(CCInfo, Mask << 10, Arg); |
1998 | Info.setWorkItemIDY(Arg); |
1999 | } |
2000 | |
2001 | if (Info.hasWorkItemIDZ()) |
2002 | Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo, Mask << 20, Arg)); |
2003 | } |
2004 | |
2005 | |
2006 | void SITargetLowering::allocateSpecialInputVGPRsFixed( |
2007 | CCState &CCInfo, MachineFunction &MF, |
2008 | const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { |
2009 | Register Reg = CCInfo.AllocateReg(AMDGPU::VGPR31); |
2010 | if (!Reg) |
2011 | report_fatal_error("failed to allocated VGPR for implicit arguments"); |
2012 | |
2013 | const unsigned Mask = 0x3ff; |
2014 | Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg, Mask)); |
2015 | Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg, Mask << 10)); |
2016 | Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg, Mask << 20)); |
2017 | } |
2018 | |
2019 | void SITargetLowering::allocateSpecialInputSGPRs( |
2020 | CCState &CCInfo, |
2021 | MachineFunction &MF, |
2022 | const SIRegisterInfo &TRI, |
2023 | SIMachineFunctionInfo &Info) const { |
2024 | auto &ArgInfo = Info.getArgInfo(); |
2025 | |
2026 | |
2027 | |
2028 | if (Info.hasDispatchPtr()) |
2029 | allocateSGPR64Input(CCInfo, ArgInfo.DispatchPtr); |
2030 | |
2031 | if (Info.hasQueuePtr()) |
2032 | allocateSGPR64Input(CCInfo, ArgInfo.QueuePtr); |
2033 | |
2034 | |
2035 | |
2036 | if (Info.hasImplicitArgPtr()) |
2037 | allocateSGPR64Input(CCInfo, ArgInfo.ImplicitArgPtr); |
2038 | |
2039 | if (Info.hasDispatchID()) |
2040 | allocateSGPR64Input(CCInfo, ArgInfo.DispatchID); |
2041 | |
2042 | |
2043 | |
2044 | if (Info.hasWorkGroupIDX()) |
2045 | allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDX); |
2046 | |
2047 | if (Info.hasWorkGroupIDY()) |
2048 | allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDY); |
2049 | |
2050 | if (Info.hasWorkGroupIDZ()) |
2051 | allocateSGPR32Input(CCInfo, ArgInfo.WorkGroupIDZ); |
2052 | } |
2053 | |
2054 | |
2055 | void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo, |
2056 | MachineFunction &MF, |
2057 | const SIRegisterInfo &TRI, |
2058 | SIMachineFunctionInfo &Info) const { |
2059 | if (Info.hasImplicitBufferPtr()) { |
2060 | Register ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); |
2061 | MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); |
2062 | CCInfo.AllocateReg(ImplicitBufferPtrReg); |
2063 | } |
2064 | |
2065 | |
2066 | if (Info.hasPrivateSegmentBuffer()) { |
2067 | Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); |
2068 | MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); |
2069 | CCInfo.AllocateReg(PrivateSegmentBufferReg); |
2070 | } |
2071 | |
2072 | if (Info.hasDispatchPtr()) { |
2073 | Register DispatchPtrReg = Info.addDispatchPtr(TRI); |
2074 | MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); |
2075 | CCInfo.AllocateReg(DispatchPtrReg); |
2076 | } |
2077 | |
2078 | if (Info.hasQueuePtr()) { |
2079 | Register QueuePtrReg = Info.addQueuePtr(TRI); |
2080 | MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); |
2081 | CCInfo.AllocateReg(QueuePtrReg); |
2082 | } |
2083 | |
2084 | if (Info.hasKernargSegmentPtr()) { |
2085 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
2086 | Register InputPtrReg = Info.addKernargSegmentPtr(TRI); |
2087 | CCInfo.AllocateReg(InputPtrReg); |
2088 | |
2089 | Register VReg = MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); |
2090 | MRI.setType(VReg, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); |
2091 | } |
2092 | |
2093 | if (Info.hasDispatchID()) { |
2094 | Register DispatchIDReg = Info.addDispatchID(TRI); |
2095 | MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); |
2096 | CCInfo.AllocateReg(DispatchIDReg); |
2097 | } |
2098 | |
2099 | if (Info.hasFlatScratchInit() && !getSubtarget()->isAmdPalOS()) { |
2100 | Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); |
2101 | MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); |
2102 | CCInfo.AllocateReg(FlatScratchInitReg); |
2103 | } |
2104 | |
2105 | |
2106 | |
2107 | } |
2108 | |
2109 | |
2110 | void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo, |
2111 | MachineFunction &MF, |
2112 | SIMachineFunctionInfo &Info, |
2113 | CallingConv::ID CallConv, |
2114 | bool IsShader) const { |
2115 | if (Info.hasWorkGroupIDX()) { |
2116 | Register Reg = Info.addWorkGroupIDX(); |
2117 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
2118 | CCInfo.AllocateReg(Reg); |
2119 | } |
2120 | |
2121 | if (Info.hasWorkGroupIDY()) { |
2122 | Register Reg = Info.addWorkGroupIDY(); |
2123 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
2124 | CCInfo.AllocateReg(Reg); |
2125 | } |
2126 | |
2127 | if (Info.hasWorkGroupIDZ()) { |
2128 | Register Reg = Info.addWorkGroupIDZ(); |
2129 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
2130 | CCInfo.AllocateReg(Reg); |
2131 | } |
2132 | |
2133 | if (Info.hasWorkGroupInfo()) { |
2134 | Register Reg = Info.addWorkGroupInfo(); |
2135 | MF.addLiveIn(Reg, &AMDGPU::SGPR_32RegClass); |
2136 | CCInfo.AllocateReg(Reg); |
2137 | } |
2138 | |
2139 | if (Info.hasPrivateSegmentWaveByteOffset()) { |
2140 | |
2141 | unsigned PrivateSegmentWaveByteOffsetReg; |
2142 | |
2143 | if (IsShader) { |
2144 | PrivateSegmentWaveByteOffsetReg = |
2145 | Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); |
2146 | |
2147 | |
2148 | |
2149 | if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { |
2150 | PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); |
2151 | Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); |
2152 | } |
2153 | } else |
2154 | PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); |
2155 | |
2156 | MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); |
2157 | CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); |
2158 | } |
2159 | } |
2160 | |
2161 | static void reservePrivateMemoryRegs(const TargetMachine &TM, |
2162 | MachineFunction &MF, |
2163 | const SIRegisterInfo &TRI, |
2164 | SIMachineFunctionInfo &Info) { |
2165 | |
2166 | |
2167 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
2168 | bool HasStackObjects = MFI.hasStackObjects(); |
2169 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
2170 | |
2171 | |
2172 | |
2173 | if (HasStackObjects) |
2174 | Info.setHasNonSpillStackObjects(true); |
2175 | |
2176 | |
2177 | |
2178 | if (TM.getOptLevel() == CodeGenOpt::None) |
2179 | HasStackObjects = true; |
2180 | |
2181 | |
2182 | |
2183 | bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); |
2184 | |
2185 | if (!ST.enableFlatScratch()) { |
2186 | if (RequiresStackAccess && ST.isAmdHsaOrMesa(MF.getFunction())) { |
2187 | |
2188 | |
2189 | |
2190 | |
2191 | Register PrivateSegmentBufferReg = |
2192 | Info.getPreloadedReg(AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); |
2193 | Info.setScratchRSrcReg(PrivateSegmentBufferReg); |
2194 | } else { |
2195 | unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); |
2196 | |
2197 | |
2198 | |
2199 | |
2200 | |
2201 | |
2202 | |
2203 | |
2204 | |
2205 | Info.setScratchRSrcReg(ReservedBufferReg); |
2206 | } |
2207 | } |
2208 | |
2209 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
2210 | |
2211 | |
2212 | |
2213 | |
2214 | |
2215 | |
2216 | |
2217 | |
2218 | |
2219 | |
2220 | |
2221 | |
2222 | |
2223 | |
2224 | if (!MRI.isLiveIn(AMDGPU::SGPR32)) { |
2225 | Info.setStackPtrOffsetReg(AMDGPU::SGPR32); |
2226 | } else { |
2227 | assert(AMDGPU::isShader(MF.getFunction().getCallingConv())); |
2228 | |
2229 | if (MFI.hasCalls()) |
2230 | report_fatal_error("call in graphics shader with too many input SGPRs"); |
2231 | |
2232 | for (unsigned Reg : AMDGPU::SGPR_32RegClass) { |
2233 | if (!MRI.isLiveIn(Reg)) { |
2234 | Info.setStackPtrOffsetReg(Reg); |
2235 | break; |
2236 | } |
2237 | } |
2238 | |
2239 | if (Info.getStackPtrOffsetReg() == AMDGPU::SP_REG) |
2240 | report_fatal_error("failed to find register for SP"); |
2241 | } |
2242 | |
2243 | |
2244 | |
2245 | |
2246 | if (ST.getFrameLowering()->hasFP(MF)) { |
2247 | Info.setFrameOffsetReg(AMDGPU::SGPR33); |
2248 | } |
2249 | } |
2250 | |
2251 | bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { |
2252 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); |
2253 | return !Info->isEntryFunction(); |
2254 | } |
2255 | |
2256 | void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { |
2257 | |
2258 | } |
2259 | |
2260 | void SITargetLowering::insertCopiesSplitCSR( |
2261 | MachineBasicBlock *Entry, |
2262 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { |
2263 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
2264 | |
2265 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); |
2266 | if (!IStart) |
2267 | return; |
2268 | |
2269 | const TargetInstrInfo *TII = Subtarget->getInstrInfo(); |
2270 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); |
2271 | MachineBasicBlock::iterator MBBI = Entry->begin(); |
2272 | for (const MCPhysReg *I = IStart; *I; ++I) { |
2273 | const TargetRegisterClass *RC = nullptr; |
2274 | if (AMDGPU::SReg_64RegClass.contains(*I)) |
2275 | RC = &AMDGPU::SGPR_64RegClass; |
2276 | else if (AMDGPU::SReg_32RegClass.contains(*I)) |
2277 | RC = &AMDGPU::SGPR_32RegClass; |
2278 | else |
2279 | llvm_unreachable("Unexpected register class in CSRsViaCopy!"); |
2280 | |
2281 | Register NewVR = MRI->createVirtualRegister(RC); |
2282 | |
2283 | Entry->addLiveIn(*I); |
2284 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) |
2285 | .addReg(*I); |
2286 | |
2287 | |
2288 | for (auto *Exit : Exits) |
2289 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), |
2290 | TII->get(TargetOpcode::COPY), *I) |
2291 | .addReg(NewVR); |
2292 | } |
2293 | } |
2294 | |
2295 | SDValue SITargetLowering::LowerFormalArguments( |
2296 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, |
2297 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, |
2298 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { |
2299 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
2300 | |
2301 | MachineFunction &MF = DAG.getMachineFunction(); |
2302 | const Function &Fn = MF.getFunction(); |
2303 | FunctionType *FType = MF.getFunction().getFunctionType(); |
2304 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
2305 | |
2306 | if (Subtarget->isAmdHsaOS() && AMDGPU::isGraphics(CallConv)) { |
2307 | DiagnosticInfoUnsupported NoGraphicsHSA( |
2308 | Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); |
2309 | DAG.getContext()->diagnose(NoGraphicsHSA); |
2310 | return DAG.getEntryNode(); |
2311 | } |
2312 | |
2313 | Info->allocateModuleLDSGlobal(Fn.getParent()); |
2314 | |
2315 | SmallVector<ISD::InputArg, 16> Splits; |
2316 | SmallVector<CCValAssign, 16> ArgLocs; |
2317 | BitVector Skipped(Ins.size()); |
2318 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, |
2319 | *DAG.getContext()); |
2320 | |
2321 | bool IsGraphics = AMDGPU::isGraphics(CallConv); |
2322 | bool IsKernel = AMDGPU::isKernel(CallConv); |
2323 | bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); |
2324 | |
2325 | if (IsGraphics) { |
2326 | assert(!Info->hasDispatchPtr() && !Info->hasKernargSegmentPtr() && |
2327 | (!Info->hasFlatScratchInit() || Subtarget->enableFlatScratch()) && |
2328 | !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && |
2329 | !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && |
2330 | !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && |
2331 | !Info->hasWorkItemIDZ()); |
2332 | } |
2333 | |
2334 | if (CallConv == CallingConv::AMDGPU_PS) { |
2335 | processPSInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); |
2336 | |
2337 | |
2338 | |
2339 | |
2340 | |
2341 | |
2342 | |
2343 | |
2344 | |
2345 | |
2346 | |
2347 | |
2348 | |
2349 | |
2350 | if ((Info->getPSInputAddr() & 0x7F) == 0 || |
2351 | ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11))) { |
2352 | CCInfo.AllocateReg(AMDGPU::VGPR0); |
2353 | CCInfo.AllocateReg(AMDGPU::VGPR1); |
2354 | Info->markPSInputAllocated(0); |
2355 | Info->markPSInputEnabled(0); |
2356 | } |
2357 | if (Subtarget->isAmdPalOS()) { |
2358 | |
2359 | |
2360 | |
2361 | |
2362 | |
2363 | |
2364 | |
2365 | |
2366 | unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); |
2367 | if ((PsInputBits & 0x7F) == 0 || |
2368 | ((PsInputBits & 0xF) == 0 && (PsInputBits >> 11 & 1))) |
2369 | Info->markPSInputEnabled( |
2370 | countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); |
2371 | } |
2372 | } else if (IsKernel) { |
2373 | assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); |
2374 | } else { |
2375 | Splits.append(Ins.begin(), Ins.end()); |
2376 | } |
2377 | |
2378 | if (IsEntryFunc) { |
2379 | allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); |
2380 | allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); |
2381 | } else { |
2382 | |
2383 | if (AMDGPUTargetMachine::EnableFixedFunctionABI) |
2384 | allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); |
2385 | } |
2386 | |
2387 | if (IsKernel) { |
2388 | analyzeFormalArgumentsCompute(CCInfo, Ins); |
2389 | } else { |
2390 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); |
2391 | CCInfo.AnalyzeFormalArguments(Splits, AssignFn); |
2392 | } |
2393 | |
2394 | SmallVector<SDValue, 16> Chains; |
2395 | |
2396 | |
2397 | |
2398 | |
2399 | |
2400 | |
2401 | const Align KernelArgBaseAlign = Align(16); |
2402 | |
2403 | for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { |
2404 | const ISD::InputArg &Arg = Ins[i]; |
2405 | if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) { |
2406 | InVals.push_back(DAG.getUNDEF(Arg.VT)); |
2407 | continue; |
2408 | } |
2409 | |
2410 | CCValAssign &VA = ArgLocs[ArgIdx++]; |
2411 | MVT VT = VA.getLocVT(); |
2412 | |
2413 | if (IsEntryFunc && VA.isMemLoc()) { |
2414 | VT = Ins[i].VT; |
2415 | EVT MemVT = VA.getLocVT(); |
2416 | |
2417 | const uint64_t Offset = VA.getLocMemOffset(); |
2418 | Align Alignment = commonAlignment(KernelArgBaseAlign, Offset); |
2419 | |
2420 | if (Arg.Flags.isByRef()) { |
2421 | SDValue Ptr = lowerKernArgParameterPtr(DAG, DL, Chain, Offset); |
2422 | |
2423 | const GCNTargetMachine &TM = |
2424 | static_cast<const GCNTargetMachine &>(getTargetMachine()); |
2425 | if (!TM.isNoopAddrSpaceCast(AMDGPUAS::CONSTANT_ADDRESS, |
2426 | Arg.Flags.getPointerAddrSpace())) { |
2427 | Ptr = DAG.getAddrSpaceCast(DL, VT, Ptr, AMDGPUAS::CONSTANT_ADDRESS, |
2428 | Arg.Flags.getPointerAddrSpace()); |
2429 | } |
2430 | |
2431 | InVals.push_back(Ptr); |
2432 | continue; |
2433 | } |
2434 | |
2435 | SDValue Arg = lowerKernargMemParameter( |
2436 | DAG, VT, MemVT, DL, Chain, Offset, Alignment, Ins[i].Flags.isSExt(), &Ins[i]); |
2437 | Chains.push_back(Arg.getValue(1)); |
2438 | |
2439 | auto *ParamTy = |
2440 | dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); |
2441 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && |
2442 | ParamTy && (ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || |
2443 | ParamTy->getAddressSpace() == AMDGPUAS::REGION_ADDRESS)) { |
2444 | |
2445 | |
2446 | |
2447 | Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, |
2448 | DAG.getValueType(MVT::i16)); |
2449 | } |
2450 | |
2451 | InVals.push_back(Arg); |
2452 | continue; |
2453 | } else if (!IsEntryFunc && VA.isMemLoc()) { |
2454 | SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); |
2455 | InVals.push_back(Val); |
2456 | if (!Arg.Flags.isByVal()) |
2457 | Chains.push_back(Val.getValue(1)); |
2458 | continue; |
2459 | } |
2460 | |
2461 | assert(VA.isRegLoc() && "Parameter must be in a register!"); |
2462 | |
2463 | Register Reg = VA.getLocReg(); |
2464 | const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); |
2465 | EVT ValVT = VA.getValVT(); |
2466 | |
2467 | Reg = MF.addLiveIn(Reg, RC); |
2468 | SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); |
2469 | |
2470 | if (Arg.Flags.isSRet()) { |
2471 | |
2472 | |
2473 | |
2474 | |
2475 | |
2476 | unsigned NumBits |
2477 | = 32 - getSubtarget()->getKnownHighZeroBitsForFrameIndex(); |
2478 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, |
2479 | DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); |
2480 | } |
2481 | |
2482 | |
2483 | |
2484 | |
2485 | switch (VA.getLocInfo()) { |
2486 | case CCValAssign::Full: |
2487 | break; |
2488 | case CCValAssign::BCvt: |
2489 | Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); |
2490 | break; |
2491 | case CCValAssign::SExt: |
2492 | Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, |
2493 | DAG.getValueType(ValVT)); |
2494 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); |
2495 | break; |
2496 | case CCValAssign::ZExt: |
2497 | Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, |
2498 | DAG.getValueType(ValVT)); |
2499 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); |
2500 | break; |
2501 | case CCValAssign::AExt: |
2502 | Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); |
2503 | break; |
2504 | default: |
2505 | llvm_unreachable("Unknown loc info!"); |
2506 | } |
2507 | |
2508 | InVals.push_back(Val); |
2509 | } |
2510 | |
2511 | if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) { |
2512 | |
2513 | allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); |
2514 | } |
2515 | |
2516 | |
2517 | if (IsEntryFunc) { |
2518 | allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsGraphics); |
2519 | } else { |
2520 | CCInfo.AllocateReg(Info->getScratchRSrcReg()); |
2521 | allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); |
2522 | } |
2523 | |
2524 | auto &ArgUsageInfo = |
2525 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); |
2526 | ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); |
2527 | |
2528 | unsigned StackArgSize = CCInfo.getNextStackOffset(); |
2529 | Info->setBytesInStackArgArea(StackArgSize); |
2530 | |
2531 | return Chains.empty() ? Chain : |
2532 | DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); |
2533 | } |
2534 | |
2535 | |
2536 | |
2537 | bool SITargetLowering::CanLowerReturn( |
2538 | CallingConv::ID CallConv, |
2539 | MachineFunction &MF, bool IsVarArg, |
2540 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2541 | LLVMContext &Context) const { |
2542 | |
2543 | |
2544 | |
2545 | if (AMDGPU::isEntryFunctionCC(CallConv)) |
2546 | return true; |
2547 | |
2548 | SmallVector<CCValAssign, 16> RVLocs; |
2549 | CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); |
2550 | return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); |
2551 | } |
2552 | |
2553 | SDValue |
2554 | SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, |
2555 | bool isVarArg, |
2556 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2557 | const SmallVectorImpl<SDValue> &OutVals, |
2558 | const SDLoc &DL, SelectionDAG &DAG) const { |
2559 | MachineFunction &MF = DAG.getMachineFunction(); |
2560 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
2561 | |
2562 | if (AMDGPU::isKernel(CallConv)) { |
2563 | return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, |
2564 | OutVals, DL, DAG); |
2565 | } |
2566 | |
2567 | bool IsShader = AMDGPU::isShader(CallConv); |
2568 | |
2569 | Info->setIfReturnsVoid(Outs.empty()); |
2570 | bool IsWaveEnd = Info->returnsVoid() && IsShader; |
2571 | |
2572 | |
2573 | SmallVector<CCValAssign, 48> RVLocs; |
2574 | SmallVector<ISD::OutputArg, 48> Splits; |
2575 | |
2576 | |
2577 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, |
2578 | *DAG.getContext()); |
2579 | |
2580 | |
2581 | CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg)); |
2582 | |
2583 | SDValue Flag; |
2584 | SmallVector<SDValue, 48> RetOps; |
2585 | RetOps.push_back(Chain); |
2586 | |
2587 | |
2588 | if (!Info->isEntryFunction()) { |
2589 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
2590 | SDValue ReturnAddrReg = CreateLiveInRegister( |
2591 | DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); |
2592 | |
2593 | SDValue ReturnAddrVirtualReg = DAG.getRegister( |
2594 | MF.getRegInfo().createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass), |
2595 | MVT::i64); |
2596 | Chain = |
2597 | DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag); |
2598 | Flag = Chain.getValue(1); |
2599 | RetOps.push_back(ReturnAddrVirtualReg); |
2600 | } |
2601 | |
2602 | |
2603 | for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E; |
2604 | ++I, ++RealRVLocIdx) { |
2605 | CCValAssign &VA = RVLocs[I]; |
2606 | assert(VA.isRegLoc() && "Can only return in registers!"); |
2607 | |
2608 | SDValue Arg = OutVals[RealRVLocIdx]; |
2609 | |
2610 | |
2611 | switch (VA.getLocInfo()) { |
2612 | case CCValAssign::Full: |
2613 | break; |
2614 | case CCValAssign::BCvt: |
2615 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); |
2616 | break; |
2617 | case CCValAssign::SExt: |
2618 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); |
2619 | break; |
2620 | case CCValAssign::ZExt: |
2621 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); |
2622 | break; |
2623 | case CCValAssign::AExt: |
2624 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); |
2625 | break; |
2626 | default: |
2627 | llvm_unreachable("Unknown loc info!"); |
2628 | } |
2629 | |
2630 | Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); |
2631 | Flag = Chain.getValue(1); |
2632 | RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); |
2633 | } |
2634 | |
2635 | |
2636 | if (!Info->isEntryFunction()) { |
2637 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
2638 | const MCPhysReg *I = |
2639 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); |
2640 | if (I) { |
2641 | for (; *I; ++I) { |
2642 | if (AMDGPU::SReg_64RegClass.contains(*I)) |
2643 | RetOps.push_back(DAG.getRegister(*I, MVT::i64)); |
2644 | else if (AMDGPU::SReg_32RegClass.contains(*I)) |
2645 | RetOps.push_back(DAG.getRegister(*I, MVT::i32)); |
2646 | else |
2647 | llvm_unreachable("Unexpected register class in CSRsViaCopy!"); |
2648 | } |
2649 | } |
2650 | } |
2651 | |
2652 | |
2653 | RetOps[0] = Chain; |
2654 | if (Flag.getNode()) |
2655 | RetOps.push_back(Flag); |
2656 | |
2657 | unsigned Opc = AMDGPUISD::ENDPGM; |
2658 | if (!IsWaveEnd) |
2659 | Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; |
2660 | return DAG.getNode(Opc, DL, MVT::Other, RetOps); |
2661 | } |
2662 | |
2663 | SDValue SITargetLowering::LowerCallResult( |
2664 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, |
2665 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, |
2666 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, |
2667 | SDValue ThisVal) const { |
2668 | CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); |
2669 | |
2670 | |
2671 | SmallVector<CCValAssign, 16> RVLocs; |
2672 | CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, |
2673 | *DAG.getContext()); |
2674 | CCInfo.AnalyzeCallResult(Ins, RetCC); |
2675 | |
2676 | |
2677 | for (unsigned i = 0; i != RVLocs.size(); ++i) { |
2678 | CCValAssign VA = RVLocs[i]; |
2679 | SDValue Val; |
2680 | |
2681 | if (VA.isRegLoc()) { |
2682 | Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); |
2683 | Chain = Val.getValue(1); |
2684 | InFlag = Val.getValue(2); |
2685 | } else if (VA.isMemLoc()) { |
2686 | report_fatal_error("TODO: return values in memory"); |
2687 | } else |
2688 | llvm_unreachable("unknown argument location type"); |
2689 | |
2690 | switch (VA.getLocInfo()) { |
2691 | case CCValAssign::Full: |
2692 | break; |
2693 | case CCValAssign::BCvt: |
2694 | Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); |
2695 | break; |
2696 | case CCValAssign::ZExt: |
2697 | Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, |
2698 | DAG.getValueType(VA.getValVT())); |
2699 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); |
2700 | break; |
2701 | case CCValAssign::SExt: |
2702 | Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, |
2703 | DAG.getValueType(VA.getValVT())); |
2704 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); |
2705 | break; |
2706 | case CCValAssign::AExt: |
2707 | Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); |
2708 | break; |
2709 | default: |
2710 | llvm_unreachable("Unknown loc info!"); |
2711 | } |
2712 | |
2713 | InVals.push_back(Val); |
2714 | } |
2715 | |
2716 | return Chain; |
2717 | } |
2718 | |
2719 | |
2720 | |
2721 | void SITargetLowering::passSpecialInputs( |
2722 | CallLoweringInfo &CLI, |
2723 | CCState &CCInfo, |
2724 | const SIMachineFunctionInfo &Info, |
2725 | SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, |
2726 | SmallVectorImpl<SDValue> &MemOpChains, |
2727 | SDValue Chain) const { |
2728 | |
2729 | |
2730 | if (!CLI.CB) |
2731 | return; |
2732 | |
2733 | SelectionDAG &DAG = CLI.DAG; |
2734 | const SDLoc &DL = CLI.DL; |
2735 | |
2736 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
2737 | const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); |
2738 | |
2739 | const AMDGPUFunctionArgInfo *CalleeArgInfo |
2740 | = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; |
2741 | if (const Function *CalleeFunc = CLI.CB->getCalledFunction()) { |
2742 | auto &ArgUsageInfo = |
2743 | DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); |
2744 | CalleeArgInfo = &ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); |
2745 | } |
2746 | |
2747 | |
2748 | |
2749 | |
2750 | AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { |
2751 | AMDGPUFunctionArgInfo::DISPATCH_PTR, |
2752 | AMDGPUFunctionArgInfo::QUEUE_PTR, |
2753 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, |
2754 | AMDGPUFunctionArgInfo::DISPATCH_ID, |
2755 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X, |
2756 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, |
2757 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z |
2758 | }; |
2759 | |
2760 | for (auto InputID : InputRegs) { |
2761 | const ArgDescriptor *OutgoingArg; |
2762 | const TargetRegisterClass *ArgRC; |
2763 | LLT ArgTy; |
2764 | |
2765 | std::tie(OutgoingArg, ArgRC, ArgTy) = |
2766 | CalleeArgInfo->getPreloadedValue(InputID); |
2767 | if (!OutgoingArg) |
2768 | continue; |
2769 | |
2770 | const ArgDescriptor *IncomingArg; |
2771 | const TargetRegisterClass *IncomingArgRC; |
2772 | LLT Ty; |
2773 | std::tie(IncomingArg, IncomingArgRC, Ty) = |
2774 | CallerArgInfo.getPreloadedValue(InputID); |
2775 | assert(IncomingArgRC == ArgRC); |
2776 | |
2777 | |
2778 | EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; |
2779 | SDValue InputReg; |
2780 | |
2781 | if (IncomingArg) { |
2782 | InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); |
2783 | } else { |
2784 | |
2785 | |
2786 | assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); |
2787 | InputReg = getImplicitArgPtr(DAG, DL); |
2788 | } |
2789 | |
2790 | if (OutgoingArg->isRegister()) { |
2791 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); |
2792 | if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) |
2793 | report_fatal_error("failed to allocate implicit input argument"); |
2794 | } else { |
2795 | unsigned SpecialArgOffset = |
2796 | CCInfo.AllocateStack(ArgVT.getStoreSize(), Align(4)); |
2797 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, |
2798 | SpecialArgOffset); |
2799 | MemOpChains.push_back(ArgStore); |
2800 | } |
2801 | } |
2802 | |
2803 | |
2804 | |
2805 | const ArgDescriptor *OutgoingArg; |
2806 | const TargetRegisterClass *ArgRC; |
2807 | LLT Ty; |
2808 | |
2809 | std::tie(OutgoingArg, ArgRC, Ty) = |
2810 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); |
2811 | if (!OutgoingArg) |
2812 | std::tie(OutgoingArg, ArgRC, Ty) = |
2813 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); |
2814 | if (!OutgoingArg) |
2815 | std::tie(OutgoingArg, ArgRC, Ty) = |
2816 | CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); |
2817 | if (!OutgoingArg) |
2818 | return; |
2819 | |
2820 | const ArgDescriptor *IncomingArgX = std::get<0>( |
2821 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X)); |
2822 | const ArgDescriptor *IncomingArgY = std::get<0>( |
2823 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y)); |
2824 | const ArgDescriptor *IncomingArgZ = std::get<0>( |
2825 | CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z)); |
2826 | |
2827 | SDValue InputReg; |
2828 | SDLoc SL; |
2829 | |
2830 | |
2831 | if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX) |
2832 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX); |
2833 | |
2834 | if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY) { |
2835 | SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY); |
2836 | Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y, |
2837 | DAG.getShiftAmountConstant(10, MVT::i32, SL)); |
2838 | InputReg = InputReg.getNode() ? |
2839 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y; |
2840 | } |
2841 | |
2842 | if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ) { |
2843 | SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ); |
2844 | Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z, |
2845 | DAG.getShiftAmountConstant(20, MVT::i32, SL)); |
2846 | InputReg = InputReg.getNode() ? |
2847 | DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Z) : Z; |
2848 | } |
2849 | |
2850 | if (!InputReg.getNode()) { |
2851 | |
2852 | |
2853 | ArgDescriptor IncomingArg = ArgDescriptor::createArg( |
2854 | IncomingArgX ? *IncomingArgX : |
2855 | IncomingArgY ? *IncomingArgY : |
2856 | *IncomingArgZ, ~0u); |
2857 | InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, IncomingArg); |
2858 | } |
2859 | |
2860 | if (OutgoingArg->isRegister()) { |
2861 | RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); |
2862 | CCInfo.AllocateReg(OutgoingArg->getRegister()); |
2863 | } else { |
2864 | unsigned SpecialArgOffset = CCInfo.AllocateStack(4, Align(4)); |
2865 | SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, |
2866 | SpecialArgOffset); |
2867 | MemOpChains.push_back(ArgStore); |
2868 | } |
2869 | } |
2870 | |
2871 | static bool canGuaranteeTCO(CallingConv::ID CC) { |
2872 | return CC == CallingConv::Fast; |
2873 | } |
2874 | |
2875 | |
2876 | static bool mayTailCallThisCC(CallingConv::ID CC) { |
2877 | switch (CC) { |
2878 | case CallingConv::C: |
2879 | case CallingConv::AMDGPU_Gfx: |
2880 | return true; |
2881 | default: |
2882 | return canGuaranteeTCO(CC); |
2883 | } |
2884 | } |
2885 | |
2886 | bool SITargetLowering::isEligibleForTailCallOptimization( |
2887 | SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, |
2888 | const SmallVectorImpl<ISD::OutputArg> &Outs, |
2889 | const SmallVectorImpl<SDValue> &OutVals, |
2890 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { |
2891 | if (!mayTailCallThisCC(CalleeCC)) |
2892 | return false; |
2893 | |
2894 | |
2895 | |
2896 | if (Callee->isDivergent()) |
2897 | return false; |
2898 | |
2899 | MachineFunction &MF = DAG.getMachineFunction(); |
2900 | const Function &CallerF = MF.getFunction(); |
2901 | CallingConv::ID CallerCC = CallerF.getCallingConv(); |
2902 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
2903 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); |
2904 | |
2905 | |
2906 | |
2907 | if (!CallerPreserved) |
2908 | return false; |
2909 | |
2910 | bool CCMatch = CallerCC == CalleeCC; |
2911 | |
2912 | if (DAG.getTarget().Options.GuaranteedTailCallOpt) { |
2913 | if (canGuaranteeTCO(CalleeCC) && CCMatch) |
2914 | return true; |
2915 | return false; |
2916 | } |
2917 | |
2918 | |
2919 | if (IsVarArg) |
2920 | return false; |
2921 | |
2922 | for (const Argument &Arg : CallerF.args()) { |
2923 | if (Arg.hasByValAttr()) |
2924 | return false; |
2925 | } |
2926 | |
2927 | LLVMContext &Ctx = *DAG.getContext(); |
2928 | |
2929 | |
2930 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, |
2931 | CCAssignFnForCall(CalleeCC, IsVarArg), |
2932 | CCAssignFnForCall(CallerCC, IsVarArg))) |
2933 | return false; |
2934 | |
2935 | |
2936 | if (!CCMatch) { |
2937 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); |
2938 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) |
2939 | return false; |
2940 | } |
2941 | |
2942 | |
2943 | if (Outs.empty()) |
2944 | return true; |
2945 | |
2946 | SmallVector<CCValAssign, 16> ArgLocs; |
2947 | CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); |
2948 | |
2949 | CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); |
2950 | |
2951 | const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); |
2952 | |
2953 | |
2954 | |
2955 | if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) |
2956 | return false; |
2957 | |
2958 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
2959 | return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); |
2960 | } |
2961 | |
2962 | bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { |
2963 | if (!CI->isTailCall()) |
2964 | return false; |
2965 | |
2966 | const Function *ParentFn = CI->getParent()->getParent(); |
2967 | if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) |
2968 | return false; |
2969 | return true; |
2970 | } |
2971 | |
2972 | |
2973 | SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, |
2974 | SmallVectorImpl<SDValue> &InVals) const { |
2975 | SelectionDAG &DAG = CLI.DAG; |
2976 | const SDLoc &DL = CLI.DL; |
2977 | SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; |
2978 | SmallVector<SDValue, 32> &OutVals = CLI.OutVals; |
2979 | SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; |
2980 | SDValue Chain = CLI.Chain; |
2981 | SDValue Callee = CLI.Callee; |
2982 | bool &IsTailCall = CLI.IsTailCall; |
2983 | CallingConv::ID CallConv = CLI.CallConv; |
2984 | bool IsVarArg = CLI.IsVarArg; |
2985 | bool IsSibCall = false; |
2986 | bool IsThisReturn = false; |
2987 | MachineFunction &MF = DAG.getMachineFunction(); |
2988 | |
2989 | if (Callee.isUndef() || isNullConstant(Callee)) { |
2990 | if (!CLI.IsTailCall) { |
2991 | for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) |
2992 | InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); |
2993 | } |
2994 | |
2995 | return Chain; |
2996 | } |
2997 | |
2998 | if (IsVarArg) { |
2999 | return lowerUnhandledCall(CLI, InVals, |
3000 | "unsupported call to variadic function "); |
3001 | } |
3002 | |
3003 | if (!CLI.CB) |
3004 | report_fatal_error("unsupported libcall legalization"); |
3005 | |
3006 | if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { |
3007 | return lowerUnhandledCall(CLI, InVals, |
3008 | "unsupported required tail call to function "); |
3009 | } |
3010 | |
3011 | if (AMDGPU::isShader(CallConv)) { |
3012 | |
3013 | |
3014 | return lowerUnhandledCall(CLI, InVals, |
3015 | "unsupported call to a shader function "); |
3016 | } |
3017 | |
3018 | if (AMDGPU::isShader(MF.getFunction().getCallingConv()) && |
3019 | CallConv != CallingConv::AMDGPU_Gfx) { |
3020 | |
3021 | return lowerUnhandledCall(CLI, InVals, |
3022 | "unsupported calling convention for call from " |
3023 | "graphics shader of function "); |
3024 | } |
3025 | |
3026 | if (IsTailCall) { |
3027 | IsTailCall = isEligibleForTailCallOptimization( |
3028 | Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); |
3029 | if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall()) { |
3030 | report_fatal_error("failed to perform tail call elimination on a call " |
3031 | "site marked musttail"); |
3032 | } |
3033 | |
3034 | bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; |
3035 | |
3036 | |
3037 | |
3038 | if (!TailCallOpt && IsTailCall) |
3039 | IsSibCall = true; |
3040 | |
3041 | if (IsTailCall) |
3042 | ++NumTailCalls; |
3043 | } |
3044 | |
3045 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
3046 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; |
3047 | SmallVector<SDValue, 8> MemOpChains; |
3048 | |
3049 | |
3050 | SmallVector<CCValAssign, 16> ArgLocs; |
3051 | CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); |
3052 | CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); |
3053 | |
3054 | if (AMDGPUTargetMachine::EnableFixedFunctionABI && |
3055 | CallConv != CallingConv::AMDGPU_Gfx) { |
3056 | |
3057 | passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); |
3058 | } |
3059 | |
3060 | CCInfo.AnalyzeCallOperands(Outs, AssignFn); |
3061 | |
3062 | |
3063 | unsigned NumBytes = CCInfo.getNextStackOffset(); |
3064 | |
3065 | if (IsSibCall) { |
3066 | |
3067 | |
3068 | NumBytes = 0; |
3069 | } |
3070 | |
3071 | |
3072 | |
3073 | |
3074 | |
3075 | |
3076 | int32_t FPDiff = 0; |
3077 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
3078 | |
3079 | |
3080 | |
3081 | if (!IsSibCall) { |
3082 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); |
3083 | |
3084 | if (!Subtarget->enableFlatScratch()) { |
3085 | SmallVector<SDValue, 4> CopyFromChains; |
3086 | |
3087 | |
3088 | SDValue ScratchRSrcReg |
3089 | = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); |
3090 | RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); |
3091 | CopyFromChains.push_back(ScratchRSrcReg.getValue(1)); |
3092 | Chain = DAG.getTokenFactor(DL, CopyFromChains); |
3093 | } |
3094 | } |
3095 | |
3096 | MVT PtrVT = MVT::i32; |
3097 | |
3098 | |
3099 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { |
3100 | CCValAssign &VA = ArgLocs[i]; |
3101 | SDValue Arg = OutVals[i]; |
3102 | |
3103 | |
3104 | switch (VA.getLocInfo()) { |
3105 | case CCValAssign::Full: |
3106 | break; |
3107 | case CCValAssign::BCvt: |
3108 | Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); |
3109 | break; |
3110 | case CCValAssign::ZExt: |
3111 | Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); |
3112 | break; |
3113 | case CCValAssign::SExt: |
3114 | Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); |
3115 | break; |
3116 | case CCValAssign::AExt: |
3117 | Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); |
3118 | break; |
3119 | case CCValAssign::FPExt: |
3120 | Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); |
3121 | break; |
3122 | default: |
3123 | llvm_unreachable("Unknown loc info!"); |
3124 | } |
3125 | |
3126 | if (VA.isRegLoc()) { |
3127 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); |
3128 | } else { |
3129 | assert(VA.isMemLoc()); |
3130 | |
3131 | SDValue DstAddr; |
3132 | MachinePointerInfo DstInfo; |
3133 | |
3134 | unsigned LocMemOffset = VA.getLocMemOffset(); |
3135 | int32_t Offset = LocMemOffset; |
3136 | |
3137 | SDValue PtrOff = DAG.getConstant(Offset, DL, PtrVT); |
3138 | MaybeAlign Alignment; |
3139 | |
3140 | if (IsTailCall) { |
3141 | ISD::ArgFlagsTy Flags = Outs[i].Flags; |
3142 | unsigned OpSize = Flags.isByVal() ? |
3143 | Flags.getByValSize() : VA.getValVT().getStoreSize(); |
3144 | |
3145 | |
3146 | Alignment = |
3147 | Flags.isByVal() |
3148 | ? Flags.getNonZeroByValAlign() |
3149 | : commonAlignment(Subtarget->getStackAlignment(), Offset); |
3150 | |
3151 | Offset = Offset + FPDiff; |
3152 | int FI = MFI.CreateFixedObject(OpSize, Offset, true); |
3153 | |
3154 | DstAddr = DAG.getFrameIndex(FI, PtrVT); |
3155 | DstInfo = MachinePointerInfo::getFixedStack(MF, FI); |
3156 | |
3157 | |
3158 | |
3159 | |
3160 | |
3161 | |
3162 | |
3163 | |
3164 | Chain = addTokenForArgument(Chain, DAG, MFI, FI); |
3165 | } else { |
3166 | |
3167 | SDValue SP = DAG.getCopyFromReg(Chain, DL, Info->getStackPtrOffsetReg(), |
3168 | MVT::i32); |
3169 | DstAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, SP, PtrOff); |
3170 | DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); |
3171 | Alignment = |
3172 | commonAlignment(Subtarget->getStackAlignment(), LocMemOffset); |
3173 | } |
3174 | |
3175 | if (Outs[i].Flags.isByVal()) { |
3176 | SDValue SizeNode = |
3177 | DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); |
3178 | SDValue Cpy = |
3179 | DAG.getMemcpy(Chain, DL, DstAddr, Arg, SizeNode, |
3180 | Outs[i].Flags.getNonZeroByValAlign(), |
3181 | false, true, |
3182 | false, DstInfo, |
3183 | MachinePointerInfo(AMDGPUAS::PRIVATE_ADDRESS)); |
3184 | |
3185 | MemOpChains.push_back(Cpy); |
3186 | } else { |
3187 | SDValue Store = |
3188 | DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo, Alignment); |
3189 | MemOpChains.push_back(Store); |
3190 | } |
3191 | } |
3192 | } |
3193 | |
3194 | if (!AMDGPUTargetMachine::EnableFixedFunctionABI && |
3195 | CallConv != CallingConv::AMDGPU_Gfx) { |
3196 | |
3197 | passSpecialInputs(CLI, CCInfo, *Info, RegsToPass, MemOpChains, Chain); |
3198 | } |
3199 | |
3200 | if (!MemOpChains.empty()) |
3201 | Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); |
3202 | |
3203 | |
3204 | |
3205 | SDValue InFlag; |
3206 | for (auto &RegToPass : RegsToPass) { |
3207 | Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, |
3208 | RegToPass.second, InFlag); |
3209 | InFlag = Chain.getValue(1); |
3210 | } |
3211 | |
3212 | |
3213 | SDValue PhysReturnAddrReg; |
3214 | if (IsTailCall) { |
3215 | |
3216 | |
3217 | |
3218 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
3219 | SDValue ReturnAddrReg = CreateLiveInRegister( |
3220 | DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); |
3221 | |
3222 | PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), |
3223 | MVT::i64); |
3224 | Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag); |
3225 | InFlag = Chain.getValue(1); |
3226 | } |
3227 | |
3228 | |
3229 | |
3230 | |
3231 | |
3232 | if (IsTailCall && !IsSibCall) { |
3233 | Chain = DAG.getCALLSEQ_END(Chain, |
3234 | DAG.getTargetConstant(NumBytes, DL, MVT::i32), |
3235 | DAG.getTargetConstant(0, DL, MVT::i32), |
3236 | InFlag, DL); |
3237 | InFlag = Chain.getValue(1); |
3238 | } |
3239 | |
3240 | std::vector<SDValue> Ops; |
3241 | Ops.push_back(Chain); |
3242 | Ops.push_back(Callee); |
3243 | |
3244 | |
3245 | if (GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(Callee)) { |
3246 | const GlobalValue *GV = GSD->getGlobal(); |
3247 | Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64)); |
3248 | } else { |
3249 | Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64)); |
3250 | } |
3251 | |
3252 | if (IsTailCall) { |
3253 | |
3254 | |
3255 | |
3256 | Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); |
3257 | |
3258 | Ops.push_back(PhysReturnAddrReg); |
3259 | } |
3260 | |
3261 | |
3262 | |
3263 | for (auto &RegToPass : RegsToPass) { |
3264 | Ops.push_back(DAG.getRegister(RegToPass.first, |
3265 | RegToPass.second.getValueType())); |
3266 | } |
3267 | |
3268 | |
3269 | |
3270 | auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); |
3271 | const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); |
3272 | assert(Mask && "Missing call preserved mask for calling convention"); |
3273 | Ops.push_back(DAG.getRegisterMask(Mask)); |
3274 | |
3275 | if (InFlag.getNode()) |
3276 | Ops.push_back(InFlag); |
3277 | |
3278 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
3279 | |
3280 | |
3281 | |
3282 | if (IsTailCall) { |
3283 | MFI.setHasTailCall(); |
3284 | return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); |
3285 | } |
3286 | |
3287 | |
3288 | SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); |
3289 | Chain = Call.getValue(0); |
3290 | InFlag = Call.getValue(1); |
3291 | |
3292 | uint64_t CalleePopBytes = NumBytes; |
3293 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), |
3294 | DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), |
3295 | InFlag, DL); |
3296 | if (!Ins.empty()) |
3297 | InFlag = Chain.getValue(1); |
3298 | |
3299 | |
3300 | |
3301 | return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, |
3302 | InVals, IsThisReturn, |
3303 | IsThisReturn ? OutVals[0] : SDValue()); |
3304 | } |
3305 | |
3306 | |
3307 | |
3308 | SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl( |
3309 | SDValue Op, SelectionDAG &DAG) const { |
3310 | const MachineFunction &MF = DAG.getMachineFunction(); |
3311 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
3312 | |
3313 | SDLoc dl(Op); |
3314 | EVT VT = Op.getValueType(); |
3315 | SDValue Tmp1 = Op; |
3316 | SDValue Tmp2 = Op.getValue(1); |
3317 | SDValue Tmp3 = Op.getOperand(2); |
3318 | SDValue Chain = Tmp1.getOperand(0); |
3319 | |
3320 | Register SPReg = Info->getStackPtrOffsetReg(); |
3321 | |
3322 | |
3323 | |
3324 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); |
3325 | |
3326 | SDValue Size = Tmp2.getOperand(1); |
3327 | SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); |
3328 | Chain = SP.getValue(1); |
3329 | MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue(); |
3330 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
3331 | const TargetFrameLowering *TFL = ST.getFrameLowering(); |
3332 | unsigned Opc = |
3333 | TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ? |
3334 | ISD::ADD : ISD::SUB; |
3335 | |
3336 | SDValue ScaledSize = DAG.getNode( |
3337 | ISD::SHL, dl, VT, Size, |
3338 | DAG.getConstant(ST.getWavefrontSizeLog2(), dl, MVT::i32)); |
3339 | |
3340 | Align StackAlign = TFL->getStackAlign(); |
3341 | Tmp1 = DAG.getNode(Opc, dl, VT, SP, ScaledSize); |
3342 | if (Alignment && *Alignment > StackAlign) { |
3343 | Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1, |
3344 | DAG.getConstant(-(uint64_t)Alignment->value() |
3345 | << ST.getWavefrontSizeLog2(), |
3346 | dl, VT)); |
3347 | } |
3348 | |
3349 | Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); |
3350 | Tmp2 = DAG.getCALLSEQ_END( |
3351 | Chain, DAG.getIntPtrConstant(0, dl, true), |
3352 | DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); |
3353 | |
3354 | return DAG.getMergeValues({Tmp1, Tmp2}, dl); |
3355 | } |
3356 | |
3357 | SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, |
3358 | SelectionDAG &DAG) const { |
3359 | |
3360 | |
3361 | |
3362 | |
3363 | |
3364 | SDValue Size = Op.getOperand(1); |
3365 | if (isa<ConstantSDNode>(Size)) |
3366 | return lowerDYNAMIC_STACKALLOCImpl(Op, DAG); |
3367 | |
3368 | return AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(Op, DAG); |
3369 | } |
3370 | |
3371 | Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT, |
3372 | const MachineFunction &MF) const { |
3373 | Register Reg = StringSwitch<Register>(RegName) |
3374 | .Case("m0", AMDGPU::M0) |
3375 | .Case("exec", AMDGPU::EXEC) |
3376 | .Case("exec_lo", AMDGPU::EXEC_LO) |
3377 | .Case("exec_hi", AMDGPU::EXEC_HI) |
3378 | .Case("flat_scratch", AMDGPU::FLAT_SCR) |
3379 | .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) |
3380 | .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) |
3381 | .Default(Register()); |
3382 | |
3383 | if (Reg == AMDGPU::NoRegister) { |
3384 | report_fatal_error(Twine("invalid register name \"" |
3385 | + StringRef(RegName) + "\".")); |
3386 | |
3387 | } |
3388 | |
3389 | if (!Subtarget->hasFlatScrRegister() && |
3390 | Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { |
3391 | report_fatal_error(Twine("invalid register \"" |
3392 | + StringRef(RegName) + "\" for subtarget.")); |
3393 | } |
3394 | |
3395 | switch (Reg) { |
3396 | case AMDGPU::M0: |
3397 | case AMDGPU::EXEC_LO: |
3398 | case AMDGPU::EXEC_HI: |
3399 | case AMDGPU::FLAT_SCR_LO: |
3400 | case AMDGPU::FLAT_SCR_HI: |
3401 | if (VT.getSizeInBits() == 32) |
3402 | return Reg; |
3403 | break; |
3404 | case AMDGPU::EXEC: |
3405 | case AMDGPU::FLAT_SCR: |
3406 | if (VT.getSizeInBits() == 64) |
3407 | return Reg; |
3408 | break; |
3409 | default: |
3410 | llvm_unreachable("missing register type checking"); |
3411 | } |
3412 | |
3413 | report_fatal_error(Twine("invalid type for register \"" |
3414 | + StringRef(RegName) + "\".")); |
3415 | } |
3416 | |
3417 | |
3418 | |
3419 | MachineBasicBlock * |
3420 | SITargetLowering::splitKillBlock(MachineInstr &MI, |
3421 | MachineBasicBlock *BB) const { |
3422 | MachineBasicBlock *SplitBB = BB->splitAt(MI, false ); |
3423 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3424 | MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); |
3425 | return SplitBB; |
3426 | } |
3427 | |
3428 | |
3429 | |
3430 | |
3431 | |
3432 | |
3433 | static std::pair<MachineBasicBlock *, MachineBasicBlock *> |
3434 | splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) { |
3435 | MachineFunction *MF = MBB.getParent(); |
3436 | MachineBasicBlock::iterator I(&MI); |
3437 | |
3438 | |
3439 | |
3440 | MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); |
3441 | MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); |
3442 | MachineFunction::iterator MBBI(MBB); |
3443 | ++MBBI; |
3444 | |
3445 | MF->insert(MBBI, LoopBB); |
3446 | MF->insert(MBBI, RemainderBB); |
3447 | |
3448 | LoopBB->addSuccessor(LoopBB); |
3449 | LoopBB->addSuccessor(RemainderBB); |
3450 | |
3451 | |
3452 | RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); |
3453 | |
3454 | if (InstInLoop) { |
3455 | auto Next = std::next(I); |
3456 | |
3457 | |
3458 | LoopBB->splice(LoopBB->begin(), &MBB, I, Next); |
3459 | |
3460 | |
3461 | RemainderBB->splice(RemainderBB->begin(), &MBB, Next, MBB.end()); |
3462 | } else { |
3463 | RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); |
3464 | } |
3465 | |
3466 | MBB.addSuccessor(LoopBB); |
3467 | |
3468 | return std::make_pair(LoopBB, RemainderBB); |
3469 | } |
3470 | |
3471 | |
3472 | void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const { |
3473 | MachineBasicBlock *MBB = MI.getParent(); |
3474 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3475 | auto I = MI.getIterator(); |
3476 | auto E = std::next(I); |
3477 | |
3478 | BuildMI(*MBB, E, MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT)) |
3479 | .addImm(0); |
3480 | |
3481 | MIBundleBuilder Bundler(*MBB, I, E); |
3482 | finalizeBundle(*MBB, Bundler.begin()); |
3483 | } |
3484 | |
3485 | MachineBasicBlock * |
3486 | SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI, |
3487 | MachineBasicBlock *BB) const { |
3488 | const DebugLoc &DL = MI.getDebugLoc(); |
3489 | |
3490 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
3491 | |
3492 | MachineBasicBlock *LoopBB; |
3493 | MachineBasicBlock *RemainderBB; |
3494 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3495 | |
3496 | |
3497 | if (MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::data0)) |
3498 | Src->setIsKill(false); |
3499 | |
3500 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, *BB, true); |
3501 | |
3502 | MachineBasicBlock::iterator I = LoopBB->end(); |
3503 | |
3504 | const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg( |
3505 | AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1); |
3506 | |
3507 | |
3508 | BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32)) |
3509 | .addImm(0) |
3510 | .addImm(EncodedReg); |
3511 | |
3512 | bundleInstWithWaitcnt(MI); |
3513 | |
3514 | Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
3515 | |
3516 | |
3517 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_GETREG_B32), Reg) |
3518 | .addImm(EncodedReg); |
3519 | |
3520 | |
3521 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CMP_LG_U32)) |
3522 | .addReg(Reg, RegState::Kill) |
3523 | .addImm(0); |
3524 | BuildMI(*LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) |
3525 | .addMBB(LoopBB); |
3526 | |
3527 | return RemainderBB; |
3528 | } |
3529 | |
3530 | |
3531 | |
3532 | |
3533 | |
3534 | |
3535 | static MachineBasicBlock::iterator |
3536 | emitLoadM0FromVGPRLoop(const SIInstrInfo *TII, MachineRegisterInfo &MRI, |
3537 | MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, |
3538 | const DebugLoc &DL, const MachineOperand &Idx, |
3539 | unsigned InitReg, unsigned ResultReg, unsigned PhiReg, |
3540 | unsigned InitSaveExecReg, int Offset, bool UseGPRIdxMode, |
3541 | Register &SGPRIdxReg) { |
3542 | |
3543 | MachineFunction *MF = OrigBB.getParent(); |
3544 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
3545 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
3546 | MachineBasicBlock::iterator I = LoopBB.begin(); |
3547 | |
3548 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); |
3549 | Register PhiExec = MRI.createVirtualRegister(BoolRC); |
3550 | Register NewExec = MRI.createVirtualRegister(BoolRC); |
3551 | Register CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
3552 | Register CondReg = MRI.createVirtualRegister(BoolRC); |
3553 | |
3554 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) |
3555 | .addReg(InitReg) |
3556 | .addMBB(&OrigBB) |
3557 | .addReg(ResultReg) |
3558 | .addMBB(&LoopBB); |
3559 | |
3560 | BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) |
3561 | .addReg(InitSaveExecReg) |
3562 | .addMBB(&OrigBB) |
3563 | .addReg(NewExec) |
3564 | .addMBB(&LoopBB); |
3565 | |
3566 | |
3567 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) |
3568 | .addReg(Idx.getReg(), getUndefRegState(Idx.isUndef())); |
3569 | |
3570 | |
3571 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) |
3572 | .addReg(CurrentIdxReg) |
3573 | .addReg(Idx.getReg(), 0, Idx.getSubReg()); |
3574 | |
3575 | |
3576 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 |
3577 | : AMDGPU::S_AND_SAVEEXEC_B64), |
3578 | NewExec) |
3579 | .addReg(CondReg, RegState::Kill); |
3580 | |
3581 | MRI.setSimpleHint(NewExec, CondReg); |
3582 | |
3583 | if (UseGPRIdxMode) { |
3584 | if (Offset == 0) { |
3585 | SGPRIdxReg = CurrentIdxReg; |
3586 | } else { |
3587 | SGPRIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); |
3588 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), SGPRIdxReg) |
3589 | .addReg(CurrentIdxReg, RegState::Kill) |
3590 | .addImm(Offset); |
3591 | } |
3592 | } else { |
3593 | |
3594 | if (Offset == 0) { |
3595 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
3596 | .addReg(CurrentIdxReg, RegState::Kill); |
3597 | } else { |
3598 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) |
3599 | .addReg(CurrentIdxReg, RegState::Kill) |
3600 | .addImm(Offset); |
3601 | } |
3602 | } |
3603 | |
3604 | |
3605 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; |
3606 | MachineInstr *InsertPt = |
3607 | BuildMI(LoopBB, I, DL, TII->get(ST.isWave32() ? AMDGPU::S_XOR_B32_term |
3608 | : AMDGPU::S_XOR_B64_term), Exec) |
3609 | .addReg(Exec) |
3610 | .addReg(NewExec); |
3611 | |
3612 | |
3613 | |
3614 | |
3615 | |
3616 | BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) |
3617 | .addMBB(&LoopBB); |
3618 | |
3619 | return InsertPt->getIterator(); |
3620 | } |
3621 | |
3622 | |
3623 | |
3624 | |
3625 | |
3626 | |
3627 | static MachineBasicBlock::iterator |
3628 | loadM0FromVGPR(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineInstr &MI, |
3629 | unsigned InitResultReg, unsigned PhiReg, int Offset, |
3630 | bool UseGPRIdxMode, Register &SGPRIdxReg) { |
3631 | MachineFunction *MF = MBB.getParent(); |
3632 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
3633 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
3634 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
3635 | const DebugLoc &DL = MI.getDebugLoc(); |
3636 | MachineBasicBlock::iterator I(&MI); |
3637 | |
3638 | const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); |
3639 | Register DstReg = MI.getOperand(0).getReg(); |
3640 | Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); |
3641 | Register TmpExec = MRI.createVirtualRegister(BoolXExecRC); |
3642 | unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; |
3643 | unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; |
3644 | |
3645 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); |
3646 | |
3647 | |
3648 | BuildMI(MBB, I, DL, TII->get(MovExecOpc), SaveExec) |
3649 | .addReg(Exec); |
3650 | |
3651 | MachineBasicBlock *LoopBB; |
3652 | MachineBasicBlock *RemainderBB; |
3653 | std::tie(LoopBB, RemainderBB) = splitBlockForLoop(MI, MBB, false); |
3654 | |
3655 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
3656 | |
3657 | auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, |
3658 | InitResultReg, DstReg, PhiReg, TmpExec, |
3659 | Offset, UseGPRIdxMode, SGPRIdxReg); |
3660 | |
3661 | MachineBasicBlock* LandingPad = MF->CreateMachineBasicBlock(); |
3662 | MachineFunction::iterator MBBI(LoopBB); |
3663 | ++MBBI; |
3664 | MF->insert(MBBI, LandingPad); |
3665 | LoopBB->removeSuccessor(RemainderBB); |
3666 | LandingPad->addSuccessor(RemainderBB); |
3667 | LoopBB->addSuccessor(LandingPad); |
3668 | MachineBasicBlock::iterator First = LandingPad->begin(); |
3669 | BuildMI(*LandingPad, First, DL, TII->get(MovExecOpc), Exec) |
3670 | .addReg(SaveExec); |
3671 | |
3672 | return InsPt; |
3673 | } |
3674 | |
3675 | |
3676 | static std::pair<unsigned, int> |
3677 | computeIndirectRegAndOffset(const SIRegisterInfo &TRI, |
3678 | const TargetRegisterClass *SuperRC, |
3679 | unsigned VecReg, |
3680 | int Offset) { |
3681 | int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; |
3682 | |
3683 | |
3684 | |
3685 | if (Offset >= NumElts || Offset < 0) |
3686 | return std::make_pair(AMDGPU::sub0, Offset); |
3687 | |
3688 | return std::make_pair(SIRegisterInfo::getSubRegFromChannel(Offset), 0); |
3689 | } |
3690 | |
3691 | static void setM0ToIndexFromSGPR(const SIInstrInfo *TII, |
3692 | MachineRegisterInfo &MRI, MachineInstr &MI, |
3693 | int Offset) { |
3694 | MachineBasicBlock *MBB = MI.getParent(); |
3695 | const DebugLoc &DL = MI.getDebugLoc(); |
3696 | MachineBasicBlock::iterator I(&MI); |
3697 | |
3698 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
3699 | |
3700 | assert(Idx->getReg() != AMDGPU::NoRegister); |
3701 | |
3702 | if (Offset == 0) { |
3703 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx); |
3704 | } else { |
3705 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) |
3706 | .add(*Idx) |
3707 | .addImm(Offset); |
3708 | } |
3709 | } |
3710 | |
3711 | static Register getIndirectSGPRIdx(const SIInstrInfo *TII, |
3712 | MachineRegisterInfo &MRI, MachineInstr &MI, |
3713 | int Offset) { |
3714 | MachineBasicBlock *MBB = MI.getParent(); |
3715 | const DebugLoc &DL = MI.getDebugLoc(); |
3716 | MachineBasicBlock::iterator I(&MI); |
3717 | |
3718 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
3719 | |
3720 | if (Offset == 0) |
3721 | return Idx->getReg(); |
3722 | |
3723 | Register Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
3724 | BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) |
3725 | .add(*Idx) |
3726 | .addImm(Offset); |
3727 | return Tmp; |
3728 | } |
3729 | |
3730 | static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, |
3731 | MachineBasicBlock &MBB, |
3732 | const GCNSubtarget &ST) { |
3733 | const SIInstrInfo *TII = ST.getInstrInfo(); |
3734 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
3735 | MachineFunction *MF = MBB.getParent(); |
3736 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
3737 | |
3738 | Register Dst = MI.getOperand(0).getReg(); |
3739 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
3740 | Register SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); |
3741 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); |
3742 | |
3743 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); |
3744 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); |
3745 | |
3746 | unsigned SubReg; |
3747 | std::tie(SubReg, Offset) |
3748 | = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); |
3749 | |
3750 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); |
3751 | |
3752 | |
3753 | if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { |
3754 | MachineBasicBlock::iterator I(&MI); |
3755 | const DebugLoc &DL = MI.getDebugLoc(); |
3756 | |
3757 | if (UseGPRIdxMode) { |
3758 | |
3759 | |
3760 | |
3761 | Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); |
3762 | |
3763 | const MCInstrDesc &GPRIDXDesc = |
3764 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); |
3765 | BuildMI(MBB, I, DL, GPRIDXDesc, Dst) |
3766 | .addReg(SrcReg) |
3767 | .addReg(Idx) |
3768 | .addImm(SubReg); |
3769 | } else { |
3770 | setM0ToIndexFromSGPR(TII, MRI, MI, Offset); |
3771 | |
3772 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) |
3773 | .addReg(SrcReg, 0, SubReg) |
3774 | .addReg(SrcReg, RegState::Implicit); |
3775 | } |
3776 | |
3777 | MI.eraseFromParent(); |
3778 | |
3779 | return &MBB; |
3780 | } |
3781 | |
3782 | |
3783 | const DebugLoc &DL = MI.getDebugLoc(); |
3784 | MachineBasicBlock::iterator I(&MI); |
3785 | |
3786 | Register PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3787 | Register InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3788 | |
3789 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); |
3790 | |
3791 | Register SGPRIdxReg; |
3792 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, |
3793 | UseGPRIdxMode, SGPRIdxReg); |
3794 | |
3795 | MachineBasicBlock *LoopBB = InsPt->getParent(); |
3796 | |
3797 | if (UseGPRIdxMode) { |
3798 | const MCInstrDesc &GPRIDXDesc = |
3799 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), true); |
3800 | |
3801 | BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) |
3802 | .addReg(SrcReg) |
3803 | .addReg(SGPRIdxReg) |
3804 | .addImm(SubReg); |
3805 | } else { |
3806 | BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) |
3807 | .addReg(SrcReg, 0, SubReg) |
3808 | .addReg(SrcReg, RegState::Implicit); |
3809 | } |
3810 | |
3811 | MI.eraseFromParent(); |
3812 | |
3813 | return LoopBB; |
3814 | } |
3815 | |
3816 | static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, |
3817 | MachineBasicBlock &MBB, |
3818 | const GCNSubtarget &ST) { |
3819 | const SIInstrInfo *TII = ST.getInstrInfo(); |
3820 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
3821 | MachineFunction *MF = MBB.getParent(); |
3822 | MachineRegisterInfo &MRI = MF->getRegInfo(); |
3823 | |
3824 | Register Dst = MI.getOperand(0).getReg(); |
3825 | const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); |
3826 | const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); |
3827 | const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); |
3828 | int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); |
3829 | const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); |
3830 | const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); |
3831 | |
3832 | |
3833 | assert(Val->getReg()); |
3834 | |
3835 | unsigned SubReg; |
3836 | std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, |
3837 | SrcVec->getReg(), |
3838 | Offset); |
3839 | const bool UseGPRIdxMode = ST.useVGPRIndexMode(); |
3840 | |
3841 | if (Idx->getReg() == AMDGPU::NoRegister) { |
3842 | MachineBasicBlock::iterator I(&MI); |
3843 | const DebugLoc &DL = MI.getDebugLoc(); |
3844 | |
3845 | assert(Offset == 0); |
3846 | |
3847 | BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) |
3848 | .add(*SrcVec) |
3849 | .add(*Val) |
3850 | .addImm(SubReg); |
3851 | |
3852 | MI.eraseFromParent(); |
3853 | return &MBB; |
3854 | } |
3855 | |
3856 | |
3857 | if (TII->getRegisterInfo().isSGPRClass(IdxRC)) { |
3858 | MachineBasicBlock::iterator I(&MI); |
3859 | const DebugLoc &DL = MI.getDebugLoc(); |
3860 | |
3861 | if (UseGPRIdxMode) { |
3862 | Register Idx = getIndirectSGPRIdx(TII, MRI, MI, Offset); |
3863 | |
3864 | const MCInstrDesc &GPRIDXDesc = |
3865 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); |
3866 | BuildMI(MBB, I, DL, GPRIDXDesc, Dst) |
3867 | .addReg(SrcVec->getReg()) |
3868 | .add(*Val) |
3869 | .addReg(Idx) |
3870 | .addImm(SubReg); |
3871 | } else { |
3872 | setM0ToIndexFromSGPR(TII, MRI, MI, Offset); |
3873 | |
3874 | const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( |
3875 | TRI.getRegSizeInBits(*VecRC), 32, false); |
3876 | BuildMI(MBB, I, DL, MovRelDesc, Dst) |
3877 | .addReg(SrcVec->getReg()) |
3878 | .add(*Val) |
3879 | .addImm(SubReg); |
3880 | } |
3881 | MI.eraseFromParent(); |
3882 | return &MBB; |
3883 | } |
3884 | |
3885 | |
3886 | if (Val->isReg()) |
3887 | MRI.clearKillFlags(Val->getReg()); |
3888 | |
3889 | const DebugLoc &DL = MI.getDebugLoc(); |
3890 | |
3891 | Register PhiReg = MRI.createVirtualRegister(VecRC); |
3892 | |
3893 | Register SGPRIdxReg; |
3894 | auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, Offset, |
3895 | UseGPRIdxMode, SGPRIdxReg); |
3896 | MachineBasicBlock *LoopBB = InsPt->getParent(); |
3897 | |
3898 | if (UseGPRIdxMode) { |
3899 | const MCInstrDesc &GPRIDXDesc = |
3900 | TII->getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); |
3901 | |
3902 | BuildMI(*LoopBB, InsPt, DL, GPRIDXDesc, Dst) |
3903 | .addReg(PhiReg) |
3904 | .add(*Val) |
3905 | .addReg(SGPRIdxReg) |
3906 | .addImm(AMDGPU::sub0); |
3907 | } else { |
3908 | const MCInstrDesc &MovRelDesc = TII->getIndirectRegWriteMovRelPseudo( |
3909 | TRI.getRegSizeInBits(*VecRC), 32, false); |
3910 | BuildMI(*LoopBB, InsPt, DL, MovRelDesc, Dst) |
3911 | .addReg(PhiReg) |
3912 | .add(*Val) |
3913 | .addImm(AMDGPU::sub0); |
3914 | } |
3915 | |
3916 | MI.eraseFromParent(); |
3917 | return LoopBB; |
3918 | } |
3919 | |
3920 | MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( |
3921 | MachineInstr &MI, MachineBasicBlock *BB) const { |
3922 | |
3923 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
3924 | MachineFunction *MF = BB->getParent(); |
3925 | SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
3926 | |
3927 | switch (MI.getOpcode()) { |
3928 | case AMDGPU::S_UADDO_PSEUDO: |
3929 | case AMDGPU::S_USUBO_PSEUDO: { |
3930 | const DebugLoc &DL = MI.getDebugLoc(); |
3931 | MachineOperand &Dest0 = MI.getOperand(0); |
3932 | MachineOperand &Dest1 = MI.getOperand(1); |
3933 | MachineOperand &Src0 = MI.getOperand(2); |
3934 | MachineOperand &Src1 = MI.getOperand(3); |
3935 | |
3936 | unsigned Opc = (MI.getOpcode() == AMDGPU::S_UADDO_PSEUDO) |
3937 | ? AMDGPU::S_ADD_I32 |
3938 | : AMDGPU::S_SUB_I32; |
3939 | BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1); |
3940 | |
3941 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CSELECT_B64), Dest1.getReg()) |
3942 | .addImm(1) |
3943 | .addImm(0); |
3944 | |
3945 | MI.eraseFromParent(); |
3946 | return BB; |
3947 | } |
3948 | case AMDGPU::S_ADD_U64_PSEUDO: |
3949 | case AMDGPU::S_SUB_U64_PSEUDO: { |
3950 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
3951 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
3952 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
3953 | const TargetRegisterClass *BoolRC = TRI->getBoolRC(); |
3954 | const DebugLoc &DL = MI.getDebugLoc(); |
3955 | |
3956 | MachineOperand &Dest = MI.getOperand(0); |
3957 | MachineOperand &Src0 = MI.getOperand(1); |
3958 | MachineOperand &Src1 = MI.getOperand(2); |
3959 | |
3960 | Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
3961 | Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
3962 | |
3963 | MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm( |
3964 | MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); |
3965 | MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm( |
3966 | MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); |
3967 | |
3968 | MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm( |
3969 | MI, MRI, Src1, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); |
3970 | MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm( |
3971 | MI, MRI, Src1, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); |
3972 | |
3973 | bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); |
3974 | |
3975 | unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; |
3976 | unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; |
3977 | BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0).add(Src0Sub0).add(Src1Sub0); |
3978 | BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1).add(Src0Sub1).add(Src1Sub1); |
3979 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) |
3980 | .addReg(DestSub0) |
3981 | .addImm(AMDGPU::sub0) |
3982 | .addReg(DestSub1) |
3983 | .addImm(AMDGPU::sub1); |
3984 | MI.eraseFromParent(); |
3985 | return BB; |
3986 | } |
3987 | case AMDGPU::V_ADD_U64_PSEUDO: |
3988 | case AMDGPU::V_SUB_U64_PSEUDO: { |
3989 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
3990 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
3991 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
3992 | const DebugLoc &DL = MI.getDebugLoc(); |
3993 | |
3994 | bool IsAdd = (MI.getOpcode() == AMDGPU::V_ADD_U64_PSEUDO); |
3995 | |
3996 | const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); |
3997 | |
3998 | Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
3999 | Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
4000 | |
4001 | Register CarryReg = MRI.createVirtualRegister(CarryRC); |
4002 | Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); |
4003 | |
4004 | MachineOperand &Dest = MI.getOperand(0); |
4005 | MachineOperand &Src0 = MI.getOperand(1); |
4006 | MachineOperand &Src1 = MI.getOperand(2); |
4007 | |
4008 | const TargetRegisterClass *Src0RC = Src0.isReg() |
4009 | ? MRI.getRegClass(Src0.getReg()) |
4010 | : &AMDGPU::VReg_64RegClass; |
4011 | const TargetRegisterClass *Src1RC = Src1.isReg() |
4012 | ? MRI.getRegClass(Src1.getReg()) |
4013 | : &AMDGPU::VReg_64RegClass; |
4014 | |
4015 | const TargetRegisterClass *Src0SubRC = |
4016 | TRI->getSubRegClass(Src0RC, AMDGPU::sub0); |
4017 | const TargetRegisterClass *Src1SubRC = |
4018 | TRI->getSubRegClass(Src1RC, AMDGPU::sub1); |
4019 | |
4020 | MachineOperand SrcReg0Sub0 = TII->buildExtractSubRegOrImm( |
4021 | MI, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); |
4022 | MachineOperand SrcReg1Sub0 = TII->buildExtractSubRegOrImm( |
4023 | MI, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC); |
4024 | |
4025 | MachineOperand SrcReg0Sub1 = TII->buildExtractSubRegOrImm( |
4026 | MI, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); |
4027 | MachineOperand SrcReg1Sub1 = TII->buildExtractSubRegOrImm( |
4028 | MI, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC); |
4029 | |
4030 | unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; |
4031 | MachineInstr *LoHalf = BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) |
4032 | .addReg(CarryReg, RegState::Define) |
4033 | .add(SrcReg0Sub0) |
4034 | .add(SrcReg1Sub0) |
4035 | .addImm(0); |
4036 | |
4037 | unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; |
4038 | MachineInstr *HiHalf = |
4039 | BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) |
4040 | .addReg(DeadCarryReg, RegState::Define | RegState::Dead) |
4041 | .add(SrcReg0Sub1) |
4042 | .add(SrcReg1Sub1) |
4043 | .addReg(CarryReg, RegState::Kill) |
4044 | .addImm(0); |
4045 | |
4046 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) |
4047 | .addReg(DestSub0) |
4048 | .addImm(AMDGPU::sub0) |
4049 | .addReg(DestSub1) |
4050 | .addImm(AMDGPU::sub1); |
4051 | TII->legalizeOperands(*LoHalf); |
4052 | TII->legalizeOperands(*HiHalf); |
4053 | MI.eraseFromParent(); |
4054 | return BB; |
4055 | } |
4056 | case AMDGPU::S_ADD_CO_PSEUDO: |
4057 | case AMDGPU::S_SUB_CO_PSEUDO: { |
4058 | |
4059 | |
4060 | |
4061 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
4062 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
4063 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
4064 | MachineBasicBlock::iterator MII = MI; |
4065 | const DebugLoc &DL = MI.getDebugLoc(); |
4066 | MachineOperand &Dest = MI.getOperand(0); |
4067 | MachineOperand &CarryDest = MI.getOperand(1); |
4068 | MachineOperand &Src0 = MI.getOperand(2); |
4069 | MachineOperand &Src1 = MI.getOperand(3); |
4070 | MachineOperand &Src2 = MI.getOperand(4); |
4071 | unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) |
4072 | ? AMDGPU::S_ADDC_U32 |
4073 | : AMDGPU::S_SUBB_U32; |
4074 | if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) { |
4075 | Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
4076 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0) |
4077 | .addReg(Src0.getReg()); |
4078 | Src0.setReg(RegOp0); |
4079 | } |
4080 | if (Src1.isReg() && TRI->isVectorRegister(MRI, Src1.getReg())) { |
4081 | Register RegOp1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
4082 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp1) |
4083 | .addReg(Src1.getReg()); |
4084 | Src1.setReg(RegOp1); |
4085 | } |
4086 | Register RegOp2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
4087 | if (TRI->isVectorRegister(MRI, Src2.getReg())) { |
4088 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp2) |
4089 | .addReg(Src2.getReg()); |
4090 | Src2.setReg(RegOp2); |
4091 | } |
4092 | |
4093 | const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg()); |
4094 | if (TRI->getRegSizeInBits(*Src2RC) == 64) { |
4095 | if (ST.hasScalarCompareEq64()) { |
4096 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64)) |
4097 | .addReg(Src2.getReg()) |
4098 | .addImm(0); |
4099 | } else { |
4100 | const TargetRegisterClass *SubRC = |
4101 | TRI->getSubRegClass(Src2RC, AMDGPU::sub0); |
4102 | MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm( |
4103 | MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC); |
4104 | MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm( |
4105 | MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC); |
4106 | Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); |
4107 | |
4108 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32) |
4109 | .add(Src2Sub0) |
4110 | .add(Src2Sub1); |
4111 | |
4112 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32)) |
4113 | .addReg(Src2_32, RegState::Kill) |
4114 | .addImm(0); |
4115 | } |
4116 | } else { |
4117 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMPK_LG_U32)) |
4118 | .addReg(Src2.getReg()) |
4119 | .addImm(0); |
4120 | } |
4121 | |
4122 | BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1); |
4123 | |
4124 | BuildMI(*BB, MII, DL, TII->get(AMDGPU::COPY), CarryDest.getReg()) |
4125 | .addReg(AMDGPU::SCC); |
4126 | MI.eraseFromParent(); |
4127 | return BB; |
4128 | } |
4129 | case AMDGPU::SI_INIT_M0: { |
4130 | BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), |
4131 | TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
4132 | .add(MI.getOperand(0)); |
4133 | MI.eraseFromParent(); |
4134 | return BB; |
4135 | } |
4136 | case AMDGPU::GET_GROUPSTATICSIZE: { |
4137 | assert(getTargetMachine().getTargetTriple().getOS() == Triple::AMDHSA || |
4138 | getTargetMachine().getTargetTriple().getOS() == Triple::AMDPAL); |
4139 | DebugLoc DL = MI.getDebugLoc(); |
4140 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) |
4141 | .add(MI.getOperand(0)) |
4142 | .addImm(MFI->getLDSSize()); |
4143 | MI.eraseFromParent(); |
4144 | return BB; |
4145 | } |
4146 | case AMDGPU::SI_INDIRECT_SRC_V1: |
4147 | case AMDGPU::SI_INDIRECT_SRC_V2: |
4148 | case AMDGPU::SI_INDIRECT_SRC_V4: |
4149 | case AMDGPU::SI_INDIRECT_SRC_V8: |
4150 | case AMDGPU::SI_INDIRECT_SRC_V16: |
4151 | case AMDGPU::SI_INDIRECT_SRC_V32: |
4152 | return emitIndirectSrc(MI, *BB, *getSubtarget()); |
4153 | case AMDGPU::SI_INDIRECT_DST_V1: |
4154 | case AMDGPU::SI_INDIRECT_DST_V2: |
4155 | case AMDGPU::SI_INDIRECT_DST_V4: |
4156 | case AMDGPU::SI_INDIRECT_DST_V8: |
4157 | case AMDGPU::SI_INDIRECT_DST_V16: |
4158 | case AMDGPU::SI_INDIRECT_DST_V32: |
4159 | return emitIndirectDst(MI, *BB, *getSubtarget()); |
4160 | case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: |
4161 | case AMDGPU::SI_KILL_I1_PSEUDO: |
4162 | return splitKillBlock(MI, BB); |
4163 | case AMDGPU::V_CNDMASK_B64_PSEUDO: { |
4164 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
4165 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
4166 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
4167 | |
4168 | Register Dst = MI.getOperand(0).getReg(); |
4169 | Register Src0 = MI.getOperand(1).getReg(); |
4170 | Register Src1 = MI.getOperand(2).getReg(); |
4171 | const DebugLoc &DL = MI.getDebugLoc(); |
4172 | Register SrcCond = MI.getOperand(3).getReg(); |
4173 | |
4174 | Register DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
4175 | Register DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
4176 | const auto *CondRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); |
4177 | Register SrcCondCopy = MRI.createVirtualRegister(CondRC); |
4178 | |
4179 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) |
4180 | .addReg(SrcCond); |
4181 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) |
4182 | .addImm(0) |
4183 | .addReg(Src0, 0, AMDGPU::sub0) |
4184 | .addImm(0) |
4185 | .addReg(Src1, 0, AMDGPU::sub0) |
4186 | .addReg(SrcCondCopy); |
4187 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) |
4188 | .addImm(0) |
4189 | .addReg(Src0, 0, AMDGPU::sub1) |
4190 | .addImm(0) |
4191 | .addReg(Src1, 0, AMDGPU::sub1) |
4192 | .addReg(SrcCondCopy); |
4193 | |
4194 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) |
4195 | .addReg(DstLo) |
4196 | .addImm(AMDGPU::sub0) |
4197 | .addReg(DstHi) |
4198 | .addImm(AMDGPU::sub1); |
4199 | MI.eraseFromParent(); |
4200 | return BB; |
4201 | } |
4202 | case AMDGPU::SI_BR_UNDEF: { |
4203 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
4204 | const DebugLoc &DL = MI.getDebugLoc(); |
4205 | MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) |
4206 | .add(MI.getOperand(0)); |
4207 | Br->getOperand(1).setIsUndef(true); |
4208 | MI.eraseFromParent(); |
4209 | return BB; |
4210 | } |
4211 | case AMDGPU::ADJCALLSTACKUP: |
4212 | case AMDGPU::ADJCALLSTACKDOWN: { |
4213 | const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); |
4214 | MachineInstrBuilder MIB(*MF, &MI); |
4215 | MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) |
4216 | .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit); |
4217 | return BB; |
4218 | } |
4219 | case AMDGPU::SI_CALL_ISEL: { |
4220 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
4221 | const DebugLoc &DL = MI.getDebugLoc(); |
4222 | |
4223 | unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); |
4224 | |
4225 | MachineInstrBuilder MIB; |
4226 | MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg); |
4227 | |
4228 | for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) |
4229 | MIB.add(MI.getOperand(I)); |
4230 | |
4231 | MIB.cloneMemRefs(MI); |
4232 | MI.eraseFromParent(); |
4233 | return BB; |
4234 | } |
4235 | case AMDGPU::V_ADD_CO_U32_e32: |
4236 | case AMDGPU::V_SUB_CO_U32_e32: |
4237 | case AMDGPU::V_SUBREV_CO_U32_e32: { |
4238 | |
4239 | const DebugLoc &DL = MI.getDebugLoc(); |
4240 | unsigned Opc = MI.getOpcode(); |
4241 | |
4242 | bool NeedClampOperand = false; |
4243 | if (TII->pseudoToMCOpcode(Opc) == -1) { |
4244 | Opc = AMDGPU::getVOPe64(Opc); |
4245 | NeedClampOperand = true; |
4246 | } |
4247 | |
4248 | auto I = BuildMI(*BB, MI, DL, TII->get(Opc), MI.getOperand(0).getReg()); |
4249 | if (TII->isVOP3(*I)) { |
4250 | const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); |
4251 | const SIRegisterInfo *TRI = ST.getRegisterInfo(); |
4252 | I.addReg(TRI->getVCC(), RegState::Define); |
4253 | } |
4254 | I.add(MI.getOperand(1)) |
4255 | .add(MI.getOperand(2)); |
4256 | if (NeedClampOperand) |
4257 | I.addImm(0); |
4258 | |
4259 | TII->legalizeOperands(*I); |
4260 | |
4261 | MI.eraseFromParent(); |
4262 | return BB; |
4263 | } |
4264 | case AMDGPU::DS_GWS_INIT: |
4265 | case AMDGPU::DS_GWS_SEMA_BR: |
4266 | case AMDGPU::DS_GWS_BARRIER: |
4267 | if (Subtarget->needsAlignedVGPRs()) { |
4268 | |
4269 | const DebugLoc &DL = MI.getDebugLoc(); |
4270 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
4271 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
4272 | MachineOperand *Op = TII->getNamedOperand(MI, AMDGPU::OpName::data0); |
4273 | Register DataReg = Op->getReg(); |
4274 | bool IsAGPR = TRI->isAGPR(MRI, DataReg); |
4275 | Register Undef = MRI.createVirtualRegister( |
4276 | IsAGPR ? &AMDGPU::AGPR_32RegClass : &AMDGPU::VGPR_32RegClass); |
4277 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), Undef); |
4278 | Register NewVR = |
4279 | MRI.createVirtualRegister(IsAGPR ? &AMDGPU::AReg_64_Align2RegClass |
4280 | : &AMDGPU::VReg_64_Align2RegClass); |
4281 | BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), NewVR) |
4282 | .addReg(DataReg, 0, Op->getSubReg()) |
4283 | .addImm(AMDGPU::sub0) |
4284 | .addReg(Undef) |
4285 | .addImm(AMDGPU::sub1); |
4286 | Op->setReg(NewVR); |
4287 | Op->setSubReg(AMDGPU::sub0); |
4288 | MI.addOperand(MachineOperand::CreateReg(NewVR, false, true)); |
4289 | } |
4290 | LLVM_FALLTHROUGH; |
4291 | case AMDGPU::DS_GWS_SEMA_V: |
4292 | case AMDGPU::DS_GWS_SEMA_P: |
4293 | case AMDGPU::DS_GWS_SEMA_RELEASE_ALL: |
4294 | |
4295 | if (getSubtarget()->hasGWSAutoReplay()) { |
4296 | bundleInstWithWaitcnt(MI); |
4297 | return BB; |
4298 | } |
4299 | |
4300 | return emitGWSMemViolTestLoop(MI, BB); |
4301 | case AMDGPU::S_SETREG_B32: { |
4302 | |
4303 | |
4304 | |
4305 | |
4306 | |
4307 | |
4308 | |
4309 | |
4310 | |
4311 | unsigned ID, Offset, Width; |
4312 | AMDGPU::Hwreg::decodeHwreg(MI.getOperand(1).getImm(), ID, Offset, Width); |
4313 | if (ID != AMDGPU::Hwreg::ID_MODE) |
4314 | return BB; |
4315 | |
4316 | const unsigned WidthMask = maskTrailingOnes<unsigned>(Width); |
4317 | const unsigned SetMask = WidthMask << Offset; |
4318 | |
4319 | if (getSubtarget()->hasDenormModeInst()) { |
4320 | unsigned SetDenormOp = 0; |
4321 | unsigned SetRoundOp = 0; |
4322 | |
4323 | |
4324 | |
4325 | if (SetMask == |
4326 | (AMDGPU::Hwreg::FP_ROUND_MASK | AMDGPU::Hwreg::FP_DENORM_MASK)) { |
4327 | |
4328 | |
4329 | SetRoundOp = AMDGPU::S_ROUND_MODE; |
4330 | SetDenormOp = AMDGPU::S_DENORM_MODE; |
4331 | } else if (SetMask == AMDGPU::Hwreg::FP_ROUND_MASK) { |
4332 | SetRoundOp = AMDGPU::S_ROUND_MODE; |
4333 | } else if (SetMask == AMDGPU::Hwreg::FP_DENORM_MASK) { |
4334 | SetDenormOp = AMDGPU::S_DENORM_MODE; |
4335 | } |
4336 | |
4337 | if (SetRoundOp || SetDenormOp) { |
4338 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); |
4339 | MachineInstr *Def = MRI.getVRegDef(MI.getOperand(0).getReg()); |
4340 | if (Def && Def->isMoveImmediate() && Def->getOperand(1).isImm()) { |
4341 | unsigned ImmVal = Def->getOperand(1).getImm(); |
4342 | if (SetRoundOp) { |
4343 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetRoundOp)) |
4344 | .addImm(ImmVal & 0xf); |
4345 | |
4346 | |
4347 | ImmVal >>= 4; |
4348 | } |
4349 | |
4350 | if (SetDenormOp) { |
4351 | BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SetDenormOp)) |
4352 | .addImm(ImmVal & 0xf); |
4353 | } |
4354 | |
4355 | MI.eraseFromParent(); |
4356 | return BB; |
4357 | } |
4358 | } |
4359 | } |
4360 | |
4361 | |
4362 | if ((SetMask & (AMDGPU::Hwreg::FP_ROUND_MASK | |
4363 | AMDGPU::Hwreg::FP_DENORM_MASK)) == SetMask) |
4364 | MI.setDesc(TII->get(AMDGPU::S_SETREG_B32_mode)); |
4365 | |
4366 | return BB; |
4367 | } |
4368 | default: |
4369 | return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); |
4370 | } |
4371 | } |
4372 | |
4373 | bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { |
4374 | return isTypeLegal(VT.getScalarType()); |
4375 | } |
4376 | |
4377 | bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { |
4378 | |
4379 | |
4380 | |
4381 | |
4382 | |
4383 | |
4384 | |
4385 | return true; |
4386 | } |
4387 | |
4388 | EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, |
4389 | EVT VT) const { |
4390 | if (!VT.isVector()) { |
4391 | return MVT::i1; |
4392 | } |
4393 | return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); |
4394 | } |
4395 | |
4396 | MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { |
4397 | |
4398 | |
4399 | return (VT == MVT::i16) ? MVT::i16 : MVT::i32; |
4400 | } |
4401 | |
4402 | LLT SITargetLowering::getPreferredShiftAmountTy(LLT Ty) const { |
4403 | return (Ty.getScalarSizeInBits() <= 16 && Subtarget->has16BitInsts()) |
4404 | ? Ty.changeElementSize(16) |
4405 | : Ty.changeElementSize(32); |
4406 | } |
4407 | |
4408 | |
4409 | |
4410 | |
4411 | |
4412 | |
4413 | |
4414 | |
4415 | |
4416 | |
4417 | |
4418 | |
4419 | |
4420 | |
4421 | |
4422 | |
4423 | bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, |
4424 | EVT VT) const { |
4425 | VT = VT.getScalarType(); |
4426 | |
4427 | switch (VT.getSimpleVT().SimpleTy) { |
4428 | case MVT::f32: { |
4429 | |
4430 | if (!Subtarget->hasMadMacF32Insts()) |
4431 | return Subtarget->hasFastFMAF32(); |
4432 | |
4433 | |
4434 | |
4435 | |
4436 | if (hasFP32Denormals(MF)) |
4437 | return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts(); |
4438 | |
4439 | |
4440 | return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts(); |
4441 | } |
4442 | case MVT::f64: |
4443 | return true; |
4444 | case MVT::f16: |
4445 | return Subtarget->has16BitInsts() && hasFP64FP16Denormals(MF); |
4446 | default: |
4447 | break; |
4448 | } |
4449 | |
4450 | return false; |
4451 | } |
4452 | |
4453 | bool SITargetLowering::isFMADLegal(const SelectionDAG &DAG, |
4454 | const SDNode *N) const { |
4455 | |
4456 | |
4457 | EVT VT = N->getValueType(0); |
4458 | if (VT == MVT::f32) |
4459 | return Subtarget->hasMadMacF32Insts() && |
4460 | !hasFP32Denormals(DAG.getMachineFunction()); |
4461 | if (VT == MVT::f16) { |
4462 | return Subtarget->hasMadF16() && |
4463 | !hasFP64FP16Denormals(DAG.getMachineFunction()); |
4464 | } |
4465 | |
4466 | return false; |
4467 | } |
4468 | |
4469 | |
4470 | |
4471 | |
4472 | |
4473 | |
4474 | |
4475 | SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, |
4476 | SelectionDAG &DAG) const { |
4477 | unsigned Opc = Op.getOpcode(); |
4478 | EVT VT = Op.getValueType(); |
4479 | assert(VT == MVT::v4f16 || VT == MVT::v4i16); |
4480 | |
4481 | SDValue Lo, Hi; |
4482 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); |
4483 | |
4484 | SDLoc SL(Op); |
4485 | SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo, |
4486 | Op->getFlags()); |
4487 | SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi, |
4488 | Op->getFlags()); |
4489 | |
4490 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); |
4491 | } |
4492 | |
4493 | |
4494 | |
4495 | SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op, |
4496 | SelectionDAG &DAG) const { |
4497 | unsigned Opc = Op.getOpcode(); |
4498 | EVT VT = Op.getValueType(); |
4499 | assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || |
4500 | VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32); |
4501 | |
4502 | SDValue Lo0, Hi0; |
4503 | std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); |
4504 | SDValue Lo1, Hi1; |
4505 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); |
4506 | |
4507 | SDLoc SL(Op); |
4508 | |
4509 | SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, |
4510 | Op->getFlags()); |
4511 | SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, |
4512 | Op->getFlags()); |
4513 | |
4514 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); |
4515 | } |
4516 | |
4517 | SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op, |
4518 | SelectionDAG &DAG) const { |
4519 | unsigned Opc = Op.getOpcode(); |
4520 | EVT VT = Op.getValueType(); |
4521 | assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4f32 || |
4522 | VT == MVT::v8f32 || VT == MVT::v16f32 || VT == MVT::v32f32); |
4523 | |
4524 | SDValue Lo0, Hi0; |
4525 | std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); |
4526 | SDValue Lo1, Hi1; |
4527 | std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); |
4528 | SDValue Lo2, Hi2; |
4529 | std::tie(Lo2, Hi2) = DAG.SplitVectorOperand(Op.getNode(), 2); |
4530 | |
4531 | SDLoc SL(Op); |
4532 | |
4533 | SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, Lo2, |
4534 | Op->getFlags()); |
4535 | SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, Hi2, |
4536 | Op->getFlags()); |
4537 | |
4538 | return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); |
4539 | } |
4540 | |
4541 | |
4542 | SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { |
4543 | switch (Op.getOpcode()) { |
4544 | default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); |
4545 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); |
4546 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); |
4547 | case ISD::LOAD: { |
4548 | SDValue Result = LowerLOAD(Op, DAG); |
4549 | assert((!Result.getNode() || |
4550 | Result.getNode()->getNumValues() == 2) && |
4551 | "Load should return a value and a chain"); |
4552 | return Result; |
4553 | } |
4554 | |
4555 | case ISD::FSIN: |
4556 | case ISD::FCOS: |
4557 | return LowerTrig(Op, DAG); |
4558 | case ISD::SELECT: return LowerSELECT(Op, DAG); |
4559 | case ISD::FDIV: return LowerFDIV(Op, DAG); |
4560 | case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); |
4561 | case ISD::STORE: return LowerSTORE(Op, DAG); |
4562 | case ISD::GlobalAddress: { |
4563 | MachineFunction &MF = DAG.getMachineFunction(); |
4564 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
4565 | return LowerGlobalAddress(MFI, Op, DAG); |
4566 | } |
4567 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); |
4568 | case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); |
4569 | case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); |
4570 | case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); |
4571 | case ISD::INSERT_SUBVECTOR: |
4572 | return lowerINSERT_SUBVECTOR(Op, DAG); |
4573 | case ISD::INSERT_VECTOR_ELT: |
4574 | return lowerINSERT_VECTOR_ELT(Op, DAG); |
4575 | case ISD::EXTRACT_VECTOR_ELT: |
4576 | return lowerEXTRACT_VECTOR_ELT(Op, DAG); |
4577 | case ISD::VECTOR_SHUFFLE: |
4578 | return lowerVECTOR_SHUFFLE(Op, DAG); |
4579 | case ISD::BUILD_VECTOR: |
4580 | return lowerBUILD_VECTOR(Op, DAG); |
4581 | case ISD::FP_ROUND: |
4582 | return lowerFP_ROUND(Op, DAG); |
4583 | case ISD::TRAP: |
4584 | return lowerTRAP(Op, DAG); |
4585 | case ISD::DEBUGTRAP: |
4586 | return lowerDEBUGTRAP(Op, DAG); |
4587 | case ISD::FABS: |
4588 | case ISD::FNEG: |
4589 | case ISD::FCANONICALIZE: |
4590 | case ISD::BSWAP: |
4591 | return splitUnaryVectorOp(Op, DAG); |
4592 | case ISD::FMINNUM: |
4593 | case ISD::FMAXNUM: |
4594 | return lowerFMINNUM_FMAXNUM(Op, DAG); |
4595 | case ISD::FMA: |
4596 | return splitTernaryVectorOp(Op, DAG); |
4597 | case ISD::FP_TO_SINT: |
4598 | case ISD::FP_TO_UINT: |
4599 | return LowerFP_TO_INT(Op, DAG); |
4600 | case ISD::SHL: |
4601 | case ISD::SRA: |
4602 | case ISD::SRL: |
4603 | case ISD::ADD: |
4604 | case ISD::SUB: |
4605 | case ISD::MUL: |
4606 | case ISD::SMIN: |
4607 | case ISD::SMAX: |
4608 | case ISD::UMIN: |
4609 | case ISD::UMAX: |
4610 | case ISD::FADD: |
4611 | case ISD::FMUL: |
4612 | case ISD::FMINNUM_IEEE: |
4613 | case ISD::FMAXNUM_IEEE: |
4614 | case ISD::UADDSAT: |
4615 | case ISD::USUBSAT: |
4616 | case ISD::SADDSAT: |
4617 | case ISD::SSUBSAT: |
4618 | return splitBinaryVectorOp(Op, DAG); |
4619 | case ISD::SMULO: |
4620 | case ISD::UMULO: |
4621 | return lowerXMULO(Op, DAG); |
4622 | case ISD::DYNAMIC_STACKALLOC: |
4623 | return LowerDYNAMIC_STACKALLOC(Op, DAG); |
4624 | } |
4625 | return SDValue(); |
4626 | } |
4627 | |
4628 | |
4629 | |
4630 | static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT, |
4631 | const SDLoc &DL, |
4632 | SelectionDAG &DAG, bool Unpacked) { |
4633 | if (!LoadVT.isVector()) |
4634 | return Result; |
4635 | |
4636 | |
4637 | |
4638 | |
4639 | EVT FittingLoadVT = LoadVT; |
4640 | if ((LoadVT.getVectorNumElements() % 2) == 1) { |
4641 | FittingLoadVT = |
4642 | EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), |
4643 | LoadVT.getVectorNumElements() + 1); |
4644 | } |
4645 | |
4646 | if (Unpacked) { |
4647 | |
4648 | EVT IntLoadVT = FittingLoadVT.changeTypeToInteger(); |
4649 | |
4650 | |
4651 | |
4652 | SmallVector<SDValue, 4> Elts; |
4653 | DAG.ExtractVectorElements(Result, Elts); |
4654 | for (SDValue &Elt : Elts) |
4655 | Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt); |
4656 | |
4657 | |
4658 | if ((LoadVT.getVectorNumElements() % 2) == 1) |
4659 | Elts.push_back(DAG.getUNDEF(MVT::i16)); |
4660 | |
4661 | Result = DAG.getBuildVector(IntLoadVT, DL, Elts); |
4662 | |
4663 | |
4664 | return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); |
4665 | } |
4666 | |
4667 | |
4668 | return DAG.getNode(ISD::BITCAST, DL, FittingLoadVT, Result); |
4669 | } |
4670 | |
4671 | SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode, |
4672 | MemSDNode *M, |
4673 | SelectionDAG &DAG, |
4674 | ArrayRef<SDValue> Ops, |
4675 | bool IsIntrinsic) const { |
4676 | SDLoc DL(M); |
4677 | |
4678 | bool Unpacked = Subtarget->hasUnpackedD16VMem(); |
4679 | EVT LoadVT = M->getValueType(0); |
4680 | |
4681 | EVT EquivLoadVT = LoadVT; |
4682 | if (LoadVT.isVector()) { |
4683 | if (Unpacked) { |
4684 | EquivLoadVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, |
4685 | LoadVT.getVectorNumElements()); |
4686 | } else if ((LoadVT.getVectorNumElements() % 2) == 1) { |
4687 | |
4688 | EquivLoadVT = |
4689 | EVT::getVectorVT(*DAG.getContext(), LoadVT.getVectorElementType(), |
4690 | LoadVT.getVectorNumElements() + 1); |
4691 | } |
4692 | } |
4693 | |
4694 | |
4695 | SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); |
4696 | |
4697 | SDValue Load |
4698 | = DAG.getMemIntrinsicNode( |
4699 | IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL, |
4700 | VTList, Ops, M->getMemoryVT(), |
4701 | M->getMemOperand()); |
4702 | |
4703 | SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked); |
4704 | |
4705 | return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); |
4706 | } |
4707 | |
4708 | SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat, |
4709 | SelectionDAG &DAG, |
4710 | ArrayRef<SDValue> Ops) const { |
4711 | SDLoc DL(M); |
4712 | EVT LoadVT = M->getValueType(0); |
4713 | EVT EltType = LoadVT.getScalarType(); |
4714 | EVT IntVT = LoadVT.changeTypeToInteger(); |
4715 | |
4716 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); |
4717 | |
4718 | unsigned Opc = |
4719 | IsFormat ? AMDGPUISD::BUFFER_LOAD_FORMAT : AMDGPUISD::BUFFER_LOAD; |
4720 | |
4721 | if (IsD16) { |
4722 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG, Ops); |
4723 | } |
4724 | |
4725 | |
4726 | if (!IsD16 && !LoadVT.isVector() && EltType.getSizeInBits() < 32) |
4727 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); |
4728 | |
4729 | if (isTypeLegal(LoadVT)) { |
4730 | return getMemIntrinsicNode(Opc, DL, M->getVTList(), Ops, IntVT, |
4731 | M->getMemOperand(), DAG); |
4732 | } |
4733 | |
4734 | EVT CastVT = getEquivalentMemType(*DAG.getContext(), LoadVT); |
4735 | SDVTList VTList = DAG.getVTList(CastVT, MVT::Other); |
4736 | SDValue MemNode = getMemIntrinsicNode(Opc, DL, VTList, Ops, CastVT, |
4737 | M->getMemOperand(), DAG); |
4738 | return DAG.getMergeValues( |
4739 | {DAG.getNode(ISD::BITCAST, DL, LoadVT, MemNode), MemNode.getValue(1)}, |
4740 | DL); |
4741 | } |
4742 | |
4743 | static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI, |
4744 | SDNode *N, SelectionDAG &DAG) { |
4745 | EVT VT = N->getValueType(0); |
4746 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); |
4747 | unsigned CondCode = CD->getZExtValue(); |
4748 | if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(CondCode))) |
4749 | return DAG.getUNDEF(VT); |
4750 | |
4751 | ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); |
4752 | |
4753 | SDValue LHS = N->getOperand(1); |
4754 | SDValue RHS = N->getOperand(2); |
4755 | |
4756 | SDLoc DL(N); |
4757 | |
4758 | EVT CmpVT = LHS.getValueType(); |
4759 | if (CmpVT == MVT::i16 && !TLI.isTypeLegal(MVT::i16)) { |
4760 | unsigned PromoteOp = ICmpInst::isSigned(IcInput) ? |
4761 | ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
4762 | LHS = DAG.getNode(PromoteOp, DL, MVT::i32, LHS); |
4763 | RHS = DAG.getNode(PromoteOp, DL, MVT::i32, RHS); |
4764 | } |
4765 | |
4766 | ISD::CondCode CCOpcode = getICmpCondCode(IcInput); |
4767 | |
4768 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); |
4769 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); |
4770 | |
4771 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, DL, CCVT, LHS, RHS, |
4772 | DAG.getCondCode(CCOpcode)); |
4773 | if (VT.bitsEq(CCVT)) |
4774 | return SetCC; |
4775 | return DAG.getZExtOrTrunc(SetCC, DL, VT); |
4776 | } |
4777 | |
4778 | static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI, |
4779 | SDNode *N, SelectionDAG &DAG) { |
4780 | EVT VT = N->getValueType(0); |
4781 | const auto *CD = cast<ConstantSDNode>(N->getOperand(3)); |
4782 | |
4783 | unsigned CondCode = CD->getZExtValue(); |
4784 | if (!FCmpInst::isFPPredicate(static_cast<FCmpInst::Predicate>(CondCode))) |
4785 | return DAG.getUNDEF(VT); |
4786 | |
4787 | SDValue Src0 = N->getOperand(1); |
4788 | SDValue Src1 = N->getOperand(2); |
4789 | EVT CmpVT = Src0.getValueType(); |
4790 | SDLoc SL(N); |
4791 | |
4792 | if (CmpVT == MVT::f16 && !TLI.isTypeLegal(CmpVT)) { |
4793 | Src0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); |
4794 | Src1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); |
4795 | } |
4796 | |
4797 | FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); |
4798 | ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); |
4799 | unsigned WavefrontSize = TLI.getSubtarget()->getWavefrontSize(); |
4800 | EVT CCVT = EVT::getIntegerVT(*DAG.getContext(), WavefrontSize); |
4801 | SDValue SetCC = DAG.getNode(AMDGPUISD::SETCC, SL, CCVT, Src0, |
4802 | Src1, DAG.getCondCode(CCOpcode)); |
4803 | if (VT.bitsEq(CCVT)) |
4804 | return SetCC; |
4805 | return DAG.getZExtOrTrunc(SetCC, SL, VT); |
4806 | } |
4807 | |
4808 | static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N, |
4809 | SelectionDAG &DAG) { |
4810 | EVT VT = N->getValueType(0); |
4811 | SDValue Src = N->getOperand(1); |
4812 | SDLoc SL(N); |
4813 | |
4814 | if (Src.getOpcode() == ISD::SETCC) { |
4815 | |
4816 | return DAG.getNode(AMDGPUISD::SETCC, SL, VT, Src.getOperand(0), |
4817 | Src.getOperand(1), Src.getOperand(2)); |
4818 | } |
4819 | if (const ConstantSDNode *Arg = dyn_cast<ConstantSDNode>(Src)) { |
4820 | |
4821 | if (Arg->isNullValue()) |
4822 | return DAG.getConstant(0, SL, VT); |
4823 | |
4824 | |
4825 | if (Arg->isOne()) { |
4826 | Register Exec; |
4827 | if (VT.getScalarSizeInBits() == 32) |
4828 | Exec = AMDGPU::EXEC_LO; |
4829 | else if (VT.getScalarSizeInBits() == 64) |
4830 | Exec = AMDGPU::EXEC; |
4831 | else |
4832 | return SDValue(); |
4833 | |
4834 | return DAG.getCopyFromReg(DAG.getEntryNode(), SL, Exec, VT); |
4835 | } |
4836 | } |
4837 | |
4838 | |
4839 | |
4840 | return DAG.getNode( |
4841 | AMDGPUISD::SETCC, SL, VT, DAG.getZExtOrTrunc(Src, SL, MVT::i32), |
4842 | DAG.getConstant(0, SL, MVT::i32), DAG.getCondCode(ISD::SETNE)); |
4843 | } |
4844 | |
4845 | void SITargetLowering::ReplaceNodeResults(SDNode *N, |
4846 | SmallVectorImpl<SDValue> &Results, |
4847 | SelectionDAG &DAG) const { |
4848 | switch (N->getOpcode()) { |
4849 | case ISD::INSERT_VECTOR_ELT: { |
4850 | if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) |
4851 | Results.push_back(Res); |
4852 | return; |
4853 | } |
4854 | case ISD::EXTRACT_VECTOR_ELT: { |
4855 | if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) |
4856 | Results.push_back(Res); |
4857 | return; |
4858 | } |
4859 | case ISD::INTRINSIC_WO_CHAIN: { |
4860 | unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); |
4861 | switch (IID) { |
4862 | case Intrinsic::amdgcn_cvt_pkrtz: { |
4863 | SDValue Src0 = N->getOperand(1); |
4864 | SDValue Src1 = N->getOperand(2); |
4865 | SDLoc SL(N); |
4866 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, |
4867 | Src0, Src1); |
4868 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); |
4869 | return; |
4870 | } |
4871 | case Intrinsic::amdgcn_cvt_pknorm_i16: |
4872 | case Intrinsic::amdgcn_cvt_pknorm_u16: |
4873 | case Intrinsic::amdgcn_cvt_pk_i16: |
4874 | case Intrinsic::amdgcn_cvt_pk_u16: { |
4875 | SDValue Src0 = N->getOperand(1); |
4876 | SDValue Src1 = N->getOperand(2); |
4877 | SDLoc SL(N); |
4878 | unsigned Opcode; |
4879 | |
4880 | if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) |
4881 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; |
4882 | else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) |
4883 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; |
4884 | else if (IID == Intrinsic::amdgcn_cvt_pk_i16) |
4885 | Opcode = AMDGPUISD::CVT_PK_I16_I32; |
4886 | else |
4887 | Opcode = AMDGPUISD::CVT_PK_U16_U32; |
4888 | |
4889 | EVT VT = N->getValueType(0); |
4890 | if (isTypeLegal(VT)) |
4891 | Results.push_back(DAG.getNode(Opcode, SL, VT, Src0, Src1)); |
4892 | else { |
4893 | SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); |
4894 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); |
4895 | } |
4896 | return; |
4897 | } |
4898 | } |
4899 | break; |
4900 | } |
4901 | case ISD::INTRINSIC_W_CHAIN: { |
4902 | if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) { |
4903 | if (Res.getOpcode() == ISD::MERGE_VALUES) { |
4904 | |
4905 | for (unsigned I = 0; I < Res.getNumOperands(); I++) { |
4906 | Results.push_back(Res.getOperand(I)); |
4907 | } |
4908 | } else { |
4909 | Results.push_back(Res); |
4910 | Results.push_back(Res.getValue(1)); |
4911 | } |
4912 | return; |
4913 | } |
4914 | |
4915 | break; |
4916 | } |
4917 | case ISD::SELECT: { |
4918 | SDLoc SL(N); |
4919 | EVT VT = N->getValueType(0); |
4920 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); |
4921 | SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); |
4922 | SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); |
4923 | |
4924 | EVT SelectVT = NewVT; |
4925 | if (NewVT.bitsLT(MVT::i32)) { |
4926 | LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); |
4927 | RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); |
4928 | SelectVT = MVT::i32; |
4929 | } |
4930 | |
4931 | SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, |
4932 | N->getOperand(0), LHS, RHS); |
4933 | |
4934 | if (NewVT != SelectVT) |
4935 | NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); |
4936 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); |
4937 | return; |
4938 | } |
4939 | case ISD::FNEG: { |
4940 | if (N->getValueType(0) != MVT::v2f16) |
4941 | break; |
4942 | |
4943 | SDLoc SL(N); |
4944 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); |
4945 | |
4946 | SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32, |
4947 | BC, |
4948 | DAG.getConstant(0x80008000, SL, MVT::i32)); |
4949 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); |
4950 | return; |
4951 | } |
4952 | case ISD::FABS: { |
4953 | if (N->getValueType(0) != MVT::v2f16) |
4954 | break; |
4955 | |
4956 | SDLoc SL(N); |
4957 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); |
4958 | |
4959 | SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32, |
4960 | BC, |
4961 | DAG.getConstant(0x7fff7fff, SL, MVT::i32)); |
4962 | Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); |
4963 | return; |
4964 | } |
4965 | default: |
4966 | break; |
4967 | } |
4968 | } |
4969 | |
4970 | |
4971 | static SDNode *findUser(SDValue Value, unsigned Opcode) { |
4972 | |
4973 | SDNode *Parent = Value.getNode(); |
4974 | for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); |
4975 | I != E; ++I) { |
4976 | |
4977 | if (I.getUse().get() != Value) |
4978 | continue; |
4979 | |
4980 | if (I->getOpcode() == Opcode) |
4981 | return *I; |
4982 | } |
4983 | return nullptr; |
4984 | } |
4985 | |
4986 | unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { |
4987 | if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { |
4988 | switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { |
4989 | case Intrinsic::amdgcn_if: |
4990 | return AMDGPUISD::IF; |
4991 | case Intrinsic::amdgcn_else: |
4992 | return AMDGPUISD::ELSE; |
4993 | case Intrinsic::amdgcn_loop: |
4994 | return AMDGPUISD::LOOP; |
4995 | case Intrinsic::amdgcn_end_cf: |
4996 | llvm_unreachable("should not occur"); |
4997 | default: |
4998 | return 0; |
4999 | } |
5000 | } |
5001 | |
5002 | |
5003 | |
5004 | return 0; |
5005 | } |
5006 | |
5007 | bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { |
5008 | const Triple &TT = getTargetMachine().getTargetTriple(); |
5009 | return (GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || |
5010 | GV->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && |
5011 | AMDGPU::shouldEmitConstantsToTextSection(TT); |
5012 | } |
5013 | |
5014 | bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { |
5015 | |
5016 | |
5017 | return (GV->getValueType()->isFunctionTy() || |
5018 | !isNonGlobalAddrSpace(GV->getAddressSpace())) && |
5019 | !shouldEmitFixup(GV) && |
5020 | !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); |
5021 | } |
5022 | |
5023 | bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { |
5024 | return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); |
5025 | } |
5026 | |
5027 | bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const { |
5028 | if (!GV->hasExternalLinkage()) |
5029 | return true; |
5030 | |
5031 | const auto OS = getTargetMachine().getTargetTriple().getOS(); |
5032 | return OS == Triple::AMDHSA || OS == Triple::AMDPAL; |
5033 | } |
5034 | |
5035 | |
5036 | |
5037 | SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, |
5038 | SelectionDAG &DAG) const { |
5039 | SDLoc DL(BRCOND); |
5040 | |
5041 | SDNode *Intr = BRCOND.getOperand(1).getNode(); |
5042 | SDValue Target = BRCOND.getOperand(2); |
5043 | SDNode *BR = nullptr; |
5044 | SDNode *SetCC = nullptr; |
5045 | |
5046 | if (Intr->getOpcode() == ISD::SETCC) { |
5047 | |
5048 | SetCC = Intr; |
5049 | Intr = SetCC->getOperand(0).getNode(); |
5050 | |
5051 | } else { |
5052 | |
5053 | BR = findUser(BRCOND, ISD::BR); |
5054 | assert(BR && "brcond missing unconditional branch user"); |
5055 | Target = BR->getOperand(1); |
5056 | } |
5057 | |
5058 | unsigned CFNode = isCFIntrinsic(Intr); |
5059 | if (CFNode == 0) { |
5060 | |
5061 | return BRCOND; |
5062 | } |
5063 | |
5064 | bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || |
5065 | Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; |
5066 | |
5067 | assert(!SetCC || |
5068 | (SetCC->getConstantOperandVal(1) == 1 && |
5069 | cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == |
5070 | ISD::SETNE)); |
5071 | |
5072 | |
5073 | SmallVector<SDValue, 4> Ops; |
5074 | if (HaveChain) |
5075 | Ops.push_back(BRCOND.getOperand(0)); |
5076 | |
5077 | Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); |
5078 | Ops.push_back(Target); |
5079 | |
5080 | ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); |
5081 | |
5082 | |
5083 | SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); |
5084 | |
5085 | if (!HaveChain) { |
5086 | SDValue Ops[] = { |
5087 | SDValue(Result, 0), |
5088 | BRCOND.getOperand(0) |
5089 | }; |
5090 | |
5091 | Result = DAG.getMergeValues(Ops, DL).getNode(); |
5092 | } |
5093 | |
5094 | if (BR) { |
5095 | |
5096 | SDValue Ops[] = { |
5097 | BR->getOperand(0), |
5098 | BRCOND.getOperand(2) |
5099 | }; |
5100 | SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); |
5101 | DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); |
5102 | } |
5103 | |
5104 | SDValue Chain = SDValue(Result, Result->getNumValues() - 1); |
5105 | |
5106 | |
5107 | for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { |
5108 | SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); |
5109 | if (!CopyToReg) |
5110 | continue; |
5111 | |
5112 | Chain = DAG.getCopyToReg( |
5113 | Chain, DL, |
5114 | CopyToReg->getOperand(1), |
5115 | SDValue(Result, i - 1), |
5116 | SDValue()); |
5117 | |
5118 | DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); |
5119 | } |
5120 | |
5121 | |
5122 | DAG.ReplaceAllUsesOfValueWith( |
5123 | SDValue(Intr, Intr->getNumValues() - 1), |
5124 | Intr->getOperand(0)); |
5125 | |
5126 | return Chain; |
5127 | } |
5128 | |
5129 | SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, |
5130 | SelectionDAG &DAG) const { |
5131 | MVT VT = Op.getSimpleValueType(); |
5132 | SDLoc DL(Op); |
5133 | |
5134 | if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) |
5135 | return DAG.getConstant(0, DL, VT); |
5136 | |
5137 | MachineFunction &MF = DAG.getMachineFunction(); |
5138 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
5139 | |
5140 | if (Info->isEntryFunction()) |
5141 | return DAG.getConstant(0, DL, VT); |
5142 | |
5143 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
5144 | |
5145 | MFI.setReturnAddressIsTaken(true); |
5146 | |
5147 | const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); |
5148 | |
5149 | Register Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT, Op.getNode()->isDivergent())); |
5150 | |
5151 | return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); |
5152 | } |
5153 | |
5154 | SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG, |
5155 | SDValue Op, |
5156 | const SDLoc &DL, |
5157 | EVT VT) const { |
5158 | return Op.getValueType().bitsLE(VT) ? |
5159 | DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : |
5160 | DAG.getNode(ISD::FP_ROUND, DL, VT, Op, |
5161 | DAG.getTargetConstant(0, DL, MVT::i32)); |
5162 | } |
5163 | |
5164 | SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { |
5165 | assert(Op.getValueType() == MVT::f16 && |
5166 | "Do not know how to custom lower FP_ROUND for non-f16 type"); |
5167 | |
5168 | SDValue Src = Op.getOperand(0); |
5169 | EVT SrcVT = Src.getValueType(); |
5170 | if (SrcVT != MVT::f64) |
5171 | return Op; |
5172 | |
5173 | SDLoc DL(Op); |
5174 | |
5175 | SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); |
5176 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); |
5177 | return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); |
5178 | } |
5179 | |
5180 | SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op, |
5181 | SelectionDAG &DAG) const { |
5182 | EVT VT = Op.getValueType(); |
5183 | const MachineFunction &MF = DAG.getMachineFunction(); |
5184 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
5185 | bool IsIEEEMode = Info->getMode().IEEE; |
5186 | |
5187 | |
5188 | |
5189 | |
5190 | |
5191 | if (IsIEEEMode) |
5192 | return expandFMINNUM_FMAXNUM(Op.getNode(), DAG); |
5193 | |
5194 | if (VT == MVT::v4f16) |
5195 | return splitBinaryVectorOp(Op, DAG); |
5196 | return Op; |
5197 | } |
5198 | |
5199 | SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const { |
5200 | EVT VT = Op.getValueType(); |
5201 | SDLoc SL(Op); |
5202 | SDValue LHS = Op.getOperand(0); |
5203 | SDValue RHS = Op.getOperand(1); |
5204 | bool isSigned = Op.getOpcode() == ISD::SMULO; |
5205 | |
5206 | if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { |
5207 | const APInt &C = RHSC->getAPIntValue(); |
5208 | |
5209 | if (C.isPowerOf2()) { |
5210 | |
5211 | bool UseArithShift = isSigned && !C.isMinSignedValue(); |
5212 | SDValue ShiftAmt = DAG.getConstant(C.logBase2(), SL, MVT::i32); |
5213 | SDValue Result = DAG.getNode(ISD::SHL, SL, VT, LHS, ShiftAmt); |
5214 | SDValue Overflow = DAG.getSetCC(SL, MVT::i1, |
5215 | DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, |
5216 | SL, VT, Result, ShiftAmt), |
5217 | LHS, ISD::SETNE); |
5218 | return DAG.getMergeValues({ Result, Overflow }, SL); |
5219 | } |
5220 | } |
5221 | |
5222 | SDValue Result = DAG.getNode(ISD::MUL, SL, VT, LHS, RHS); |
5223 | SDValue Top = DAG.getNode(isSigned ? ISD::MULHS : ISD::MULHU, |
5224 | SL, VT, LHS, RHS); |
5225 | |
5226 | SDValue Sign = isSigned |
5227 | ? DAG.getNode(ISD::SRA, SL, VT, Result, |
5228 | DAG.getConstant(VT.getScalarSizeInBits() - 1, SL, MVT::i32)) |
5229 | : DAG.getConstant(0, SL, VT); |
5230 | SDValue Overflow = DAG.getSetCC(SL, MVT::i1, Top, Sign, ISD::SETNE); |
5231 | |
5232 | return DAG.getMergeValues({ Result, Overflow }, SL); |
5233 | } |
5234 | |
5235 | SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { |
5236 | if (!Subtarget->isTrapHandlerEnabled() || |
5237 | Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) |
5238 | return lowerTrapEndpgm(Op, DAG); |
5239 | |
5240 | if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) { |
5241 | switch (*HsaAbiVer) { |
5242 | case ELF::ELFABIVERSION_AMDGPU_HSA_V2: |
5243 | case ELF::ELFABIVERSION_AMDGPU_HSA_V3: |
5244 | return lowerTrapHsaQueuePtr(Op, DAG); |
5245 | case ELF::ELFABIVERSION_AMDGPU_HSA_V4: |
5246 | return Subtarget->supportsGetDoorbellID() ? |
5247 | lowerTrapHsa(Op, DAG) : lowerTrapHsaQueuePtr(Op, DAG); |
5248 | } |
5249 | } |
5250 | |
5251 | llvm_unreachable("Unknown trap handler"); |
5252 | } |
5253 | |
5254 | SDValue SITargetLowering::lowerTrapEndpgm( |
5255 | SDValue Op, SelectionDAG &DAG) const { |
5256 | SDLoc SL(Op); |
5257 | SDValue Chain = Op.getOperand(0); |
5258 | return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); |
5259 | } |
5260 | |
5261 | SDValue SITargetLowering::lowerTrapHsaQueuePtr( |
5262 | SDValue Op, SelectionDAG &DAG) const { |
5263 | SDLoc SL(Op); |
5264 | SDValue Chain = Op.getOperand(0); |
5265 | |
5266 | MachineFunction &MF = DAG.getMachineFunction(); |
5267 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
5268 | Register UserSGPR = Info->getQueuePtrUserSGPR(); |
5269 | assert(UserSGPR != AMDGPU::NoRegister); |
5270 | SDValue QueuePtr = CreateLiveInRegister( |
5271 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); |
5272 | SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); |
5273 | SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, |
5274 | QueuePtr, SDValue()); |
5275 | |
5276 | uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap); |
5277 | SDValue Ops[] = { |
5278 | ToReg, |
5279 | DAG.getTargetConstant(TrapID, SL, MVT::i16), |
5280 | SGPR01, |
5281 | ToReg.getValue(1) |
5282 | }; |
5283 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); |
5284 | } |
5285 | |
5286 | SDValue SITargetLowering::lowerTrapHsa( |
5287 | SDValue Op, SelectionDAG &DAG) const { |
5288 | SDLoc SL(Op); |
5289 | SDValue Chain = Op.getOperand(0); |
5290 | |
5291 | uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSATrap); |
5292 | SDValue Ops[] = { |
5293 | Chain, |
5294 | DAG.getTargetConstant(TrapID, SL, MVT::i16) |
5295 | }; |
5296 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); |
5297 | } |
5298 | |
5299 | SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { |
5300 | SDLoc SL(Op); |
5301 | SDValue Chain = Op.getOperand(0); |
5302 | MachineFunction &MF = DAG.getMachineFunction(); |
5303 | |
5304 | if (!Subtarget->isTrapHandlerEnabled() || |
5305 | Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA) { |
5306 | DiagnosticInfoUnsupported NoTrap(MF.getFunction(), |
5307 | "debugtrap handler not supported", |
5308 | Op.getDebugLoc(), |
5309 | DS_Warning); |
5310 | LLVMContext &Ctx = MF.getFunction().getContext(); |
5311 | Ctx.diagnose(NoTrap); |
5312 | return Chain; |
5313 | } |
5314 | |
5315 | uint64_t TrapID = static_cast<uint64_t>(GCNSubtarget::TrapID::LLVMAMDHSADebugTrap); |
5316 | SDValue Ops[] = { |
5317 | Chain, |
5318 | DAG.getTargetConstant(TrapID, SL, MVT::i16) |
5319 | }; |
5320 | return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); |
5321 | } |
5322 | |
5323 | SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, |
5324 | SelectionDAG &DAG) const { |
5325 | |
5326 | if (Subtarget->hasApertureRegs()) { |
5327 | unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ? |
5328 | AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : |
5329 | AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; |
5330 | unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ? |
5331 | AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : |
5332 | AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; |
5333 | unsigned Encoding = |
5334 | AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | |
5335 | Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | |
5336 | WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; |
5337 | |
5338 | SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); |
5339 | SDValue ApertureReg = SDValue( |
5340 | DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); |
5341 | SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); |
5342 | return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); |
5343 | } |
5344 | |
5345 | MachineFunction &MF = DAG.getMachineFunction(); |
5346 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
5347 | Register UserSGPR = Info->getQueuePtrUserSGPR(); |
5348 | assert(UserSGPR != AMDGPU::NoRegister); |
5349 | |
5350 | SDValue QueuePtr = CreateLiveInRegister( |
5351 | DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); |
5352 | |
5353 | |
5354 | |
5355 | uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44; |
5356 | |
5357 | SDValue Ptr = |
5358 | DAG.getObjectPtrOffset(DL, QueuePtr, TypeSize::Fixed(StructOffset)); |
5359 | |
5360 | |
5361 | |
5362 | |
5363 | MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); |
5364 | return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, |
5365 | commonAlignment(Align(64), StructOffset), |
5366 | MachineMemOperand::MODereferenceable | |
5367 | MachineMemOperand::MOInvariant); |
5368 | } |
5369 | |
5370 | SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, |
5371 | SelectionDAG &DAG) const { |
5372 | SDLoc SL(Op); |
5373 | const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); |
5374 | |
5375 | SDValue Src = ASC->getOperand(0); |
5376 | SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); |
5377 | |
5378 | const AMDGPUTargetMachine &TM = |
5379 | static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); |
5380 | |
5381 | |
5382 | if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { |
5383 | unsigned DestAS = ASC->getDestAddressSpace(); |
5384 | |
5385 | if (DestAS == AMDGPUAS::LOCAL_ADDRESS || |
5386 | DestAS == AMDGPUAS::PRIVATE_ADDRESS) { |
5387 | unsigned NullVal = TM.getNullPointerValue(DestAS); |
5388 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); |
5389 | SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); |
5390 | SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); |
5391 | |
5392 | return DAG.getNode(ISD::SELECT, SL, MVT::i32, |
5393 | NonNull, Ptr, SegmentNullPtr); |
5394 | } |
5395 | } |
5396 | |
5397 | |
5398 | if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) { |
5399 | unsigned SrcAS = ASC->getSrcAddressSpace(); |
5400 | |
5401 | if (SrcAS == AMDGPUAS::LOCAL_ADDRESS || |
5402 | SrcAS == AMDGPUAS::PRIVATE_ADDRESS) { |
5403 | unsigned NullVal = TM.getNullPointerValue(SrcAS); |
5404 | SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); |
5405 | |
5406 | SDValue NonNull |
5407 | = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); |
5408 | |
5409 | SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); |
5410 | SDValue CvtPtr |
5411 | = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); |
5412 | |
5413 | return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, |
5414 | DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), |
5415 | FlatNullPtr); |
5416 | } |
5417 | } |
5418 | |
5419 | if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT && |
5420 | Src.getValueType() == MVT::i64) |
5421 | return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); |
5422 | |
5423 | |
5424 | |
5425 | const MachineFunction &MF = DAG.getMachineFunction(); |
5426 | DiagnosticInfoUnsupported InvalidAddrSpaceCast( |
5427 | MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); |
5428 | DAG.getContext()->diagnose(InvalidAddrSpaceCast); |
5429 | |
5430 | return DAG.getUNDEF(ASC->getValueType(0)); |
5431 | } |
5432 | |
5433 | |
5434 | |
5435 | |
5436 | |
5437 | |
5438 | SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, |
5439 | SelectionDAG &DAG) const { |
5440 | SDValue Vec = Op.getOperand(0); |
5441 | SDValue Ins = Op.getOperand(1); |
5442 | SDValue Idx = Op.getOperand(2); |
5443 | EVT VecVT = Vec.getValueType(); |
5444 | EVT InsVT = Ins.getValueType(); |
5445 | EVT EltVT = VecVT.getVectorElementType(); |
5446 | unsigned InsNumElts = InsVT.getVectorNumElements(); |
5447 | unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); |
5448 | SDLoc SL(Op); |
5449 | |
5450 | for (unsigned I = 0; I != InsNumElts; ++I) { |
5451 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins, |
5452 | DAG.getConstant(I, SL, MVT::i32)); |
5453 | Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt, |
5454 | DAG.getConstant(IdxVal + I, SL, MVT::i32)); |
5455 | } |
5456 | return Vec; |
5457 | } |
5458 | |
5459 | SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, |
5460 | SelectionDAG &DAG) const { |
5461 | SDValue Vec = Op.getOperand(0); |
5462 | SDValue InsVal = Op.getOperand(1); |
5463 | SDValue Idx = Op.getOperand(2); |
5464 | EVT VecVT = Vec.getValueType(); |
5465 | EVT EltVT = VecVT.getVectorElementType(); |
5466 | unsigned VecSize = VecVT.getSizeInBits(); |
5467 | unsigned EltSize = EltVT.getSizeInBits(); |
5468 | |
5469 | |
5470 | assert(VecSize <= 64); |
5471 | |
5472 | unsigned NumElts = VecVT.getVectorNumElements(); |
5473 | SDLoc SL(Op); |
5474 | auto KIdx = dyn_cast<ConstantSDNode>(Idx); |
5475 | |
5476 | if (NumElts == 4 && EltSize == 16 && KIdx) { |
5477 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec); |
5478 | |
5479 | SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, |
5480 | DAG.getConstant(0, SL, MVT::i32)); |
5481 | SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, |
5482 | DAG.getConstant(1, SL, MVT::i32)); |
5483 | |
5484 | SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf); |
5485 | SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf); |
5486 | |
5487 | unsigned Idx = KIdx->getZExtValue(); |
5488 | bool InsertLo = Idx < 2; |
5489 | SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16, |
5490 | InsertLo ? LoVec : HiVec, |
5491 | DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal), |
5492 | DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32)); |
5493 | |
5494 | InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf); |
5495 | |
5496 | SDValue Concat = InsertLo ? |
5497 | DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) : |
5498 | DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf }); |
5499 | |
5500 | return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat); |
5501 | } |
5502 | |
5503 | if (isa<ConstantSDNode>(Idx)) |
5504 | return SDValue(); |
5505 | |
5506 | MVT IntVT = MVT::getIntegerVT(VecSize); |
5507 | |
5508 | |
5509 | |
5510 | |
5511 | |
5512 | |
5513 | SDValue ExtVal = DAG.getNode(ISD::BITCAST, SL, IntVT, |
5514 | DAG.getSplatBuildVector(VecVT, SL, InsVal)); |
5515 | |
5516 | assert(isPowerOf2_32(EltSize)); |
5517 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); |
5518 | |
5519 | |
5520 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); |
5521 | |
5522 | SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); |
5523 | SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT, |
5524 | DAG.getConstant(0xffff, SL, IntVT), |
5525 | ScaledIdx); |
5526 | |
5527 | SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal); |
5528 | SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT, |
5529 | DAG.getNOT(SL, BFM, IntVT), BCVec); |
5530 | |
5531 | SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS); |
5532 | return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI); |
5533 | } |
5534 | |
5535 | SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, |
5536 | SelectionDAG &DAG) const { |
5537 | SDLoc SL(Op); |
5538 | |
5539 | EVT ResultVT = Op.getValueType(); |
5540 | SDValue Vec = Op.getOperand(0); |
5541 | SDValue Idx = Op.getOperand(1); |
5542 | EVT VecVT = Vec.getValueType(); |
5543 | unsigned VecSize = VecVT.getSizeInBits(); |
5544 | EVT EltVT = VecVT.getVectorElementType(); |
5545 | assert(VecSize <= 64); |
5546 | |
5547 | DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); |
5548 | |
5549 | |
5550 | |
5551 | |
5552 | |
5553 | if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) |
5554 | return Combined; |
5555 | |
5556 | unsigned EltSize = EltVT.getSizeInBits(); |
5557 | assert(isPowerOf2_32(EltSize)); |
5558 | |
5559 | MVT IntVT = MVT::getIntegerVT(VecSize); |
5560 | SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); |
5561 | |
5562 | |
5563 | SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); |
5564 | |
5565 | SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); |
5566 | SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx); |
5567 | |
5568 | if (ResultVT == MVT::f16) { |
5569 | SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt); |
5570 | return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); |
5571 | } |
5572 | |
5573 | return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT); |
5574 | } |
5575 | |
5576 | static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) { |
5577 | assert(Elt % 2 == 0); |
5578 | return Mask[Elt + 1] == Mask[Elt] + 1 && (Mask[Elt] % 2 == 0); |
5579 | } |
5580 | |
5581 | SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, |
5582 | SelectionDAG &DAG) const { |
5583 | SDLoc SL(Op); |
5584 | EVT ResultVT = Op.getValueType(); |
5585 | ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); |
5586 | |
5587 | EVT PackVT = ResultVT.isInteger() ? MVT::v2i16 : MVT::v2f16; |
5588 | EVT EltVT = PackVT.getVectorElementType(); |
5589 | int SrcNumElts = Op.getOperand(0).getValueType().getVectorNumElements(); |
5590 | |
5591 | |
5592 | |
5593 | |
5594 | |
5595 | |
5596 | |
5597 | |
5598 | |
5599 | |
5600 | |
5601 | SmallVector<SDValue, 4> Pieces; |
5602 | for (int I = 0, N = ResultVT.getVectorNumElements(); I != N; I += 2) { |
5603 | if (elementPairIsContiguous(SVN->getMask(), I)) { |
5604 | const int Idx = SVN->getMaskElt(I); |
5605 | int VecIdx = Idx < SrcNumElts ? 0 : 1; |
5606 | int EltIdx = Idx < SrcNumElts ? Idx : Idx - SrcNumElts; |
5607 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, |
5608 | PackVT, SVN->getOperand(VecIdx), |
5609 | DAG.getConstant(EltIdx, SL, MVT::i32)); |
5610 | Pieces.push_back(SubVec); |
5611 | } else { |
5612 | const int Idx0 = SVN->getMaskElt(I); |
5613 | const int Idx1 = SVN->getMaskElt(I + 1); |
5614 | int VecIdx0 = Idx0 < SrcNumElts ? 0 : 1; |
5615 | int VecIdx1 = Idx1 < SrcNumElts ? 0 : 1; |
5616 | int EltIdx0 = Idx0 < SrcNumElts ? Idx0 : Idx0 - SrcNumElts; |
5617 | int EltIdx1 = Idx1 < SrcNumElts ? Idx1 : Idx1 - SrcNumElts; |
5618 | |
5619 | SDValue Vec0 = SVN->getOperand(VecIdx0); |
5620 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
5621 | Vec0, DAG.getConstant(EltIdx0, SL, MVT::i32)); |
5622 | |
5623 | SDValue Vec1 = SVN->getOperand(VecIdx1); |
5624 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
5625 | Vec1, DAG.getConstant(EltIdx1, SL, MVT::i32)); |
5626 | Pieces.push_back(DAG.getBuildVector(PackVT, SL, { Elt0, Elt1 })); |
5627 | } |
5628 | } |
5629 | |
5630 | return DAG.getNode(ISD::CONCAT_VECTORS, SL, ResultVT, Pieces); |
5631 | } |
5632 | |
5633 | SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op, |
5634 | SelectionDAG &DAG) const { |
5635 | SDLoc SL(Op); |
5636 | EVT VT = Op.getValueType(); |
5637 | |
5638 | if (VT == MVT::v4i16 || VT == MVT::v4f16) { |
5639 | EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2); |
5640 | |
5641 | |
5642 | |
5643 | SDValue Lo = DAG.getBuildVector(HalfVT, SL, |
5644 | { Op.getOperand(0), Op.getOperand(1) }); |
5645 | SDValue Hi = DAG.getBuildVector(HalfVT, SL, |
5646 | { Op.getOperand(2), Op.getOperand(3) }); |
5647 | |
5648 | SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo); |
5649 | SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi); |
5650 | |
5651 | SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi }); |
5652 | return DAG.getNode(ISD::BITCAST, SL, VT, Blend); |
5653 | } |
5654 | |
5655 | assert(VT == MVT::v2f16 || VT == MVT::v2i16); |
5656 | assert(!Subtarget->hasVOP3PInsts() && "this should be legal"); |
5657 | |
5658 | SDValue Lo = Op.getOperand(0); |
5659 | SDValue Hi = Op.getOperand(1); |
5660 | |
5661 | |
5662 | if (Hi.isUndef()) { |
5663 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); |
5664 | SDValue ExtLo = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Lo); |
5665 | return DAG.getNode(ISD::BITCAST, SL, VT, ExtLo); |
5666 | } |
5667 | |
5668 | Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi); |
5669 | Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi); |
5670 | |
5671 | SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi, |
5672 | DAG.getConstant(16, SL, MVT::i32)); |
5673 | if (Lo.isUndef()) |
5674 | return DAG.getNode(ISD::BITCAST, SL, VT, ShlHi); |
5675 | |
5676 | Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); |
5677 | Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo); |
5678 | |
5679 | SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi); |
5680 | return DAG.getNode(ISD::BITCAST, SL, VT, Or); |
5681 | } |
5682 | |
5683 | bool |
5684 | SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { |
5685 | |
5686 | return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS || |
5687 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || |
5688 | GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) && |
5689 | !shouldEmitGOTReloc(GA->getGlobal()); |
5690 | } |
5691 | |
5692 | static SDValue |
5693 | buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, |
5694 | const SDLoc &DL, int64_t Offset, EVT PtrVT, |
5695 | unsigned GAFlags = SIInstrInfo::MO_NONE) { |
5696 | assert(isInt<32>(Offset + 4) && "32-bit offset is expected!"); |
5697 | |
5698 | |
5699 | |
5700 | |
5701 | |
5702 | |
5703 | |
5704 | |
5705 | |
5706 | |
5707 | |
5708 | |
5709 | |
5710 | |
5711 | |
5712 | |
5713 | |
5714 | |
5715 | |
5716 | |
5717 | |
5718 | |
5719 | |
5720 | |
5721 | |
5722 | |
5723 | |
5724 | |
5725 | |
5726 | |
5727 | |
5728 | |
5729 | SDValue PtrLo = |
5730 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, GAFlags); |
5731 | SDValue PtrHi; |
5732 | if (GAFlags == SIInstrInfo::MO_NONE) { |
5733 | PtrHi = DAG.getTargetConstant(0, DL, MVT::i32); |
5734 | } else { |
5735 | PtrHi = |
5736 | DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 12, GAFlags + 1); |
5737 | } |
5738 | return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); |
5739 | } |
5740 | |
5741 | SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, |
5742 | SDValue Op, |
5743 | SelectionDAG &DAG) const { |
5744 | GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); |
5745 | SDLoc DL(GSD); |
5746 | EVT PtrVT = Op.getValueType(); |
5747 | |
5748 | const GlobalValue *GV = GSD->getGlobal(); |
5749 | if ((GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && |
5750 | shouldUseLDSConstAddress(GV)) || |
5751 | GSD->getAddressSpace() == AMDGPUAS::REGION_ADDRESS || |
5752 | GSD->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) { |
5753 | if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && |
5754 | GV->hasExternalLinkage()) { |
5755 | Type *Ty = GV->getValueType(); |
5756 | |
5757 | |
5758 | |
5759 | |
5760 | |
5761 | if (DAG.getDataLayout().getTypeAllocSize(Ty).isZero()) { |
5762 | assert(PtrVT == MVT::i32 && "32-bit pointer is expected."); |
5763 | |
5764 | MFI->setDynLDSAlign(DAG.getDataLayout(), *cast<GlobalVariable>(GV)); |
5765 | return SDValue( |
5766 | DAG.getMachineNode(AMDGPU::GET_GROUPSTATICSIZE, DL, PtrVT), 0); |
5767 | } |
5768 | } |
5769 | return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); |
5770 | } |
5771 | |
5772 | if (GSD->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { |
5773 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, GSD->getOffset(), |
5774 | SIInstrInfo::MO_ABS32_LO); |
5775 | return DAG.getNode(AMDGPUISD::LDS, DL, MVT::i32, GA); |
5776 | } |
5777 | |
5778 | if (shouldEmitFixup(GV)) |
5779 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); |
5780 | else if (shouldEmitPCReloc(GV)) |
5781 | return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, |
5782 | SIInstrInfo::MO_REL32); |
5783 | |
5784 | SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, |
5785 | SIInstrInfo::MO_GOTPCREL32); |
5786 | |
5787 | Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); |
5788 | PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS); |
5789 | const DataLayout &DataLayout = DAG.getDataLayout(); |
5790 | Align Alignment = DataLayout.getABITypeAlign(PtrTy); |
5791 | MachinePointerInfo PtrInfo |
5792 | = MachinePointerInfo::getGOT(DAG.getMachineFunction()); |
5793 | |
5794 | return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Alignment, |
5795 | MachineMemOperand::MODereferenceable | |
5796 | MachineMemOperand::MOInvariant); |
5797 | } |
5798 | |
5799 | SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, |
5800 | const SDLoc &DL, SDValue V) const { |
5801 | |
5802 | |
5803 | |
5804 | |
5805 | |
5806 | |
5807 | |
5808 | |
5809 | |
5810 | SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, |
5811 | V, Chain); |
5812 | return SDValue(M0, 0); |
5813 | } |
5814 | |
5815 | SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, |
5816 | SDValue Op, |
5817 | MVT VT, |
5818 | unsigned Offset) const { |
5819 | SDLoc SL(Op); |
5820 | SDValue Param = lowerKernargMemParameter( |
5821 | DAG, MVT::i32, MVT::i32, SL, DAG.getEntryNode(), Offset, Align(4), false); |
5822 | |
5823 | return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, |
5824 | DAG.getValueType(VT)); |
5825 | } |
5826 | |
5827 | static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, |
5828 | EVT VT) { |
5829 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), |
5830 | "non-hsa intrinsic with hsa target", |
5831 | DL.getDebugLoc()); |
5832 | DAG.getContext()->diagnose(BadIntrin); |
5833 | return DAG.getUNDEF(VT); |
5834 | } |
5835 | |
5836 | static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, |
5837 | EVT VT) { |
5838 | DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), |
5839 | "intrinsic not supported on subtarget", |
5840 | DL.getDebugLoc()); |
5841 | DAG.getContext()->diagnose(BadIntrin); |
5842 | return DAG.getUNDEF(VT); |
5843 | } |
5844 | |
5845 | static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL, |
5846 | ArrayRef<SDValue> Elts) { |
5847 | assert(!Elts.empty()); |
5848 | MVT Type; |
5849 | unsigned NumElts = Elts.size(); |
5850 | |
5851 | if (NumElts <= 8) { |
5852 | Type = MVT::getVectorVT(MVT::f32, NumElts); |
5853 | } else { |
5854 | assert(Elts.size() <= 16); |
5855 | Type = MVT::v16f32; |
5856 | NumElts = 16; |
5857 | } |
5858 | |
5859 | SmallVector<SDValue, 16> VecElts(NumElts); |
5860 | for (unsigned i = 0; i < Elts.size(); ++i) { |
5861 | SDValue Elt = Elts[i]; |
5862 | if (Elt.getValueType() != MVT::f32) |
5863 | Elt = DAG.getBitcast(MVT::f32, Elt); |
5864 | VecElts[i] = Elt; |
5865 | } |
5866 | for (unsigned i = Elts.size(); i < NumElts; ++i) |
5867 | VecElts[i] = DAG.getUNDEF(MVT::f32); |
5868 | |
5869 | if (NumElts == 1) |
5870 | return VecElts[0]; |
5871 | return DAG.getBuildVector(Type, DL, VecElts); |
5872 | } |
5873 | |
5874 | static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT, |
5875 | SDValue Src, int ExtraElts) { |
5876 | EVT SrcVT = Src.getValueType(); |
5877 | |
5878 | SmallVector<SDValue, 8> Elts; |
5879 | |
5880 | if (SrcVT.isVector()) |
5881 | DAG.ExtractVectorElements(Src, Elts); |
5882 | else |
5883 | Elts.push_back(Src); |
5884 | |
5885 | SDValue Undef = DAG.getUNDEF(SrcVT.getScalarType()); |
5886 | while (ExtraElts--) |
5887 | Elts.push_back(Undef); |
5888 | |
5889 | return DAG.getBuildVector(CastVT, DL, Elts); |
5890 | } |
5891 | |
5892 | |
5893 | |
5894 | |
5895 | static SDValue constructRetValue(SelectionDAG &DAG, |
5896 | MachineSDNode *Result, |
5897 | ArrayRef<EVT> ResultTypes, |
5898 | bool IsTexFail, bool Unpacked, bool IsD16, |
5899 | int DMaskPop, int NumVDataDwords, |
5900 | const SDLoc &DL) { |
5901 | |
5902 | EVT ReqRetVT = ResultTypes[0]; |
5903 | int ReqRetNumElts = ReqRetVT.isVector() ? ReqRetVT.getVectorNumElements() : 1; |
5904 | int NumDataDwords = (!IsD16 || (IsD16 && Unpacked)) ? |
5905 | ReqRetNumElts : (ReqRetNumElts + 1) / 2; |
5906 | |
5907 | int MaskPopDwords = (!IsD16 || (IsD16 && Unpacked)) ? |
5908 | DMaskPop : (DMaskPop + 1) / 2; |
5909 | |
5910 | MVT DataDwordVT = NumDataDwords == 1 ? |
5911 | MVT::i32 : MVT::getVectorVT(MVT::i32, NumDataDwords); |
5912 | |
5913 | MVT MaskPopVT = MaskPopDwords == 1 ? |
5914 | MVT::i32 : MVT::getVectorVT(MVT::i32, MaskPopDwords); |
5915 | |
5916 | SDValue Data(Result, 0); |
5917 | SDValue TexFail; |
5918 | |
5919 | if (DMaskPop > 0 && Data.getValueType() != MaskPopVT) { |
5920 | SDValue ZeroIdx = DAG.getConstant(0, DL, MVT::i32); |
5921 | if (MaskPopVT.isVector()) { |
5922 | Data = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MaskPopVT, |
5923 | SDValue(Result, 0), ZeroIdx); |
5924 | } else { |
5925 | Data = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MaskPopVT, |
5926 | SDValue(Result, 0), ZeroIdx); |
5927 | } |
5928 | } |
5929 | |
5930 | if (DataDwordVT.isVector()) |
5931 | Data = padEltsToUndef(DAG, DL, DataDwordVT, Data, |
5932 | NumDataDwords - MaskPopDwords); |
5933 | |
5934 | if (IsD16) |
5935 | Data = adjustLoadValueTypeImpl(Data, ReqRetVT, DL, DAG, Unpacked); |
5936 | |
5937 | EVT LegalReqRetVT = ReqRetVT; |
5938 | if (!ReqRetVT.isVector()) { |
5939 | Data = DAG.getNode(ISD::TRUNCATE, DL, ReqRetVT.changeTypeToInteger(), Data); |
5940 | } else { |
5941 | |
5942 | if ((ReqRetVT.getVectorNumElements() % 2) == 1 && |
5943 | ReqRetVT.getVectorElementType().getSizeInBits() == 16) { |
5944 | LegalReqRetVT = |
5945 | EVT::getVectorVT(*DAG.getContext(), ReqRetVT.getVectorElementType(), |
5946 | ReqRetVT.getVectorNumElements() + 1); |
5947 | } |
5948 | } |
5949 | Data = DAG.getNode(ISD::BITCAST, DL, LegalReqRetVT, Data); |
5950 | |
5951 | if (IsTexFail) { |
5952 | TexFail = |
5953 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, SDValue(Result, 0), |
5954 | DAG.getConstant(MaskPopDwords, DL, MVT::i32)); |
5955 | |
5956 | return DAG.getMergeValues({Data, TexFail, SDValue(Result, 1)}, DL); |
5957 | } |
5958 | |
5959 | if (Result->getNumValues() == 1) |
5960 | return Data; |
5961 | |
5962 | return DAG.getMergeValues({Data, SDValue(Result, 1)}, DL); |
5963 | } |
5964 | |
5965 | static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE, |
5966 | SDValue *LWE, bool &IsTexFail) { |
5967 | auto TexFailCtrlConst = cast<ConstantSDNode>(TexFailCtrl.getNode()); |
5968 | |
5969 | uint64_t Value = TexFailCtrlConst->getZExtValue(); |
5970 | if (Value) { |
5971 | IsTexFail = true; |
5972 | } |
5973 | |
5974 | SDLoc DL(TexFailCtrlConst); |
5975 | *TFE = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); |
5976 | Value &= ~(uint64_t)0x1; |
5977 | *LWE = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); |
5978 | Value &= ~(uint64_t)0x2; |
5979 | |
5980 | return Value == 0; |
5981 | } |
5982 | |
5983 | static void packImage16bitOpsToDwords(SelectionDAG &DAG, SDValue Op, |
5984 | MVT PackVectorVT, |
5985 | SmallVectorImpl<SDValue> &PackedAddrs, |
5986 | unsigned DimIdx, unsigned EndIdx, |
5987 | unsigned NumGradients) { |
5988 | SDLoc DL(Op); |
5989 | for (unsigned I = DimIdx; I < EndIdx; I++) { |
5990 | SDValue Addr = Op.getOperand(I); |
5991 | |
5992 | |
5993 | |
5994 | |
5995 | |
5996 | |
5997 | if (((I + 1) >= EndIdx) || |
5998 | ((NumGradients / 2) % 2 == 1 && (I == DimIdx + (NumGradients / 2) - 1 || |
5999 | I == DimIdx + NumGradients - 1))) { |
6000 | if (Addr.getValueType() != MVT::i16) |
6001 | Addr = DAG.getBitcast(MVT::i16, Addr); |
6002 | Addr = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Addr); |
6003 | } else { |
6004 | Addr = DAG.getBuildVector(PackVectorVT, DL, {Addr, Op.getOperand(I + 1)}); |
6005 | I++; |
6006 | } |
6007 | Addr = DAG.getBitcast(MVT::f32, Addr); |
6008 | PackedAddrs.push_back(Addr); |
6009 | } |
6010 | } |
6011 | |
6012 | SDValue SITargetLowering::lowerImage(SDValue Op, |
6013 | const AMDGPU::ImageDimIntrinsicInfo *Intr, |
6014 | SelectionDAG &DAG, bool WithChain) const { |
6015 | SDLoc DL(Op); |
6016 | MachineFunction &MF = DAG.getMachineFunction(); |
6017 | const GCNSubtarget* ST = &MF.getSubtarget<GCNSubtarget>(); |
6018 | const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = |
6019 | AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); |
6020 | const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); |
6021 | const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = |
6022 | AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); |
6023 | const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = |
6024 | AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); |
6025 | unsigned IntrOpcode = Intr->BaseOpcode; |
6026 | bool IsGFX10Plus = AMDGPU::isGFX10Plus(*Subtarget); |
6027 | |
6028 | SmallVector<EVT, 3> ResultTypes(Op->values()); |
6029 | SmallVector<EVT, 3> OrigResultTypes(Op->values()); |
6030 | bool IsD16 = false; |
6031 | bool IsG16 = false; |
6032 | bool IsA16 = false; |
6033 | SDValue VData; |
6034 | int NumVDataDwords; |
6035 | bool AdjustRetType = false; |
6036 | |
6037 | |
6038 | const unsigned ArgOffset = WithChain ? 2 : 1; |
6039 | |
6040 | unsigned DMask; |
6041 | unsigned DMaskLanes = 0; |
6042 | |
6043 | if (BaseOpcode->Atomic) { |
6044 | VData = Op.getOperand(2); |
6045 | |
6046 | bool Is64Bit = VData.getValueType() == MVT::i64; |
6047 | if (BaseOpcode->AtomicX2) { |
6048 | SDValue VData2 = Op.getOperand(3); |
6049 | VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL, |
6050 | {VData, VData2}); |
6051 | if (Is64Bit) |
6052 | VData = DAG.getBitcast(MVT::v4i32, VData); |
6053 | |
6054 | ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; |
6055 | DMask = Is64Bit ? 0xf : 0x3; |
6056 | NumVDataDwords = Is64Bit ? 4 : 2; |
6057 | } else { |
6058 | DMask = Is64Bit ? 0x3 : 0x1; |
6059 | NumVDataDwords = Is64Bit ? 2 : 1; |
6060 | } |
6061 | } else { |
6062 | auto *DMaskConst = |
6063 | cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->DMaskIndex)); |
6064 | DMask = DMaskConst->getZExtValue(); |
6065 | DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); |
6066 | |
6067 | if (BaseOpcode->Store) { |
6068 | VData = Op.getOperand(2); |
6069 | |
6070 | MVT StoreVT = VData.getSimpleValueType(); |
6071 | if (StoreVT.getScalarType() == MVT::f16) { |
6072 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) |
6073 | return Op; |
6074 | |
6075 | IsD16 = true; |
6076 | VData = handleD16VData(VData, DAG, true); |
6077 | } |
6078 | |
6079 | NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32; |
6080 | } else { |
6081 | |
6082 | |
6083 | MVT LoadVT = ResultTypes[0].getSimpleVT(); |
6084 | if (LoadVT.getScalarType() == MVT::f16) { |
6085 | if (!Subtarget->hasD16Images() || !BaseOpcode->HasD16) |
6086 | return Op; |
6087 | |
6088 | IsD16 = true; |
6089 | } |
6090 | |
6091 | |
6092 | if ((LoadVT.isVector() && LoadVT.getVectorNumElements() < DMaskLanes) || |
6093 | (!LoadVT.isVector() && DMaskLanes > 1)) |
6094 | return Op; |
6095 | |
6096 | |
6097 | |
6098 | |
6099 | if (IsD16 && !Subtarget->hasUnpackedD16VMem() && |
6100 | !(BaseOpcode->Gather4 && Subtarget->hasImageGather4D16Bug())) |
6101 | NumVDataDwords = (DMaskLanes + 1) / 2; |
6102 | else |
6103 | NumVDataDwords = DMaskLanes; |
6104 | |
6105 | AdjustRetType = true; |
6106 | } |
6107 | } |
6108 | |
6109 | unsigned VAddrEnd = ArgOffset + Intr->VAddrEnd; |
6110 | SmallVector<SDValue, 4> VAddrs; |
6111 | |
6112 | |
6113 | if (LZMappingInfo) { |
6114 | if (auto *ConstantLod = dyn_cast<ConstantFPSDNode>( |
6115 | Op.getOperand(ArgOffset + Intr->LodIndex))) { |
6116 | if (ConstantLod->isZero() || ConstantLod->isNegative()) { |
6117 | IntrOpcode = LZMappingInfo->LZ; |
6118 | VAddrEnd--; |
6119 | } |
6120 | } |
6121 | } |
6122 | |
6123 | |
6124 | if (MIPMappingInfo) { |
6125 | if (auto *ConstantLod = dyn_cast<ConstantSDNode>( |
6126 | Op.getOperand(ArgOffset + Intr->MipIndex))) { |
6127 | if (ConstantLod->isNullValue()) { |
6128 | IntrOpcode = MIPMappingInfo->NONMIP; |
6129 | VAddrEnd--; |
6130 | } |
6131 | } |
6132 | } |
6133 | |
6134 | |
6135 | for (unsigned I = Intr->VAddrStart; I < Intr->GradientStart; I++) |
6136 | VAddrs.push_back(Op.getOperand(ArgOffset + I)); |
6137 | |
6138 | |
6139 | MVT VAddrVT = |
6140 | Op.getOperand(ArgOffset + Intr->GradientStart).getSimpleValueType(); |
6141 | MVT VAddrScalarVT = VAddrVT.getScalarType(); |
6142 | MVT GradPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; |
6143 | IsG16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; |
6144 | |
6145 | VAddrVT = Op.getOperand(ArgOffset + Intr->CoordStart).getSimpleValueType(); |
6146 | VAddrScalarVT = VAddrVT.getScalarType(); |
6147 | MVT AddrPackVectorVT = VAddrScalarVT == MVT::f16 ? MVT::v2f16 : MVT::v2i16; |
6148 | IsA16 = VAddrScalarVT == MVT::f16 || VAddrScalarVT == MVT::i16; |
6149 | |
6150 | if (BaseOpcode->Gradients && !ST->hasG16() && (IsA16 != IsG16)) { |
6151 | |
6152 | |
6153 | LLVM_DEBUG( |
6154 | dbgs() << "Failed to lower image intrinsic: 16 bit addresses " |
6155 | "require 16 bit args for both gradients and addresses"); |
6156 | return Op; |
6157 | } |
6158 | |
6159 | if (IsA16) { |
6160 | if (!ST->hasA16()) { |
6161 | LLVM_DEBUG(dbgs() << "Failed to lower image intrinsic: Target does not " |
6162 | "support 16 bit addresses\n"); |
6163 | return Op; |
6164 | } |
6165 | } |
6166 | |
6167 | |
6168 | |
6169 | |
6170 | |
6171 | |
6172 | if (BaseOpcode->Gradients && IsG16 && ST->hasG16()) { |
6173 | |
6174 | const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = |
6175 | AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); |
6176 | IntrOpcode = G16MappingInfo->G16; |
6177 | } |
6178 | |
6179 | |
6180 | if (IsG16) { |
6181 | |
6182 | |
6183 | packImage16bitOpsToDwords(DAG, Op, GradPackVectorVT, VAddrs, |
6184 | ArgOffset + Intr->GradientStart, |
6185 | ArgOffset + Intr->CoordStart, Intr->NumGradients); |
6186 | } else { |
6187 | for (unsigned I = ArgOffset + Intr->GradientStart; |
6188 | I < ArgOffset + Intr->CoordStart; I++) |
6189 | VAddrs.push_back(Op.getOperand(I)); |
6190 | } |
6191 | |
6192 | |
6193 | if (IsA16) { |
6194 | packImage16bitOpsToDwords(DAG, Op, AddrPackVectorVT, VAddrs, |
6195 | ArgOffset + Intr->CoordStart, VAddrEnd, |
6196 | 0 ); |
6197 | } else { |
6198 | |
6199 | for (unsigned I = ArgOffset + Intr->CoordStart; I < VAddrEnd; I++) |
6200 | VAddrs.push_back(Op.getOperand(I)); |
6201 | } |
6202 | |
6203 | |
6204 | |
6205 | |
6206 | |
6207 | |
6208 | |
6209 | |
6210 | |
6211 | |
6212 | |
6213 | |
6214 | bool UseNSA = ST->hasFeature(AMDGPU::FeatureNSAEncoding) && |
6215 | VAddrs.size() >= 3 && |
6216 | VAddrs.size() <= (unsigned)ST->getNSAMaxSize(); |
6217 | SDValue VAddr; |
6218 | if (!UseNSA) |
6219 | VAddr = getBuildDwordsVector(DAG, DL, VAddrs); |
6220 | |
6221 | SDValue True = DAG.getTargetConstant(1, DL, MVT::i1); |
6222 | SDValue False = DAG.getTargetConstant(0, DL, MVT::i1); |
6223 | SDValue Unorm; |
6224 | if (!BaseOpcode->Sampler) { |
6225 | Unorm = True; |
6226 | } else { |
6227 | auto UnormConst = |
6228 | cast<ConstantSDNode>(Op.getOperand(ArgOffset + Intr->UnormIndex)); |
6229 | |
6230 | Unorm = UnormConst->getZExtValue() ? True : False; |
6231 | } |
6232 | |
6233 | SDValue TFE; |
6234 | SDValue LWE; |
6235 | SDValue TexFail = Op.getOperand(ArgOffset + Intr->TexFailCtrlIndex); |
6236 | bool IsTexFail = false; |
6237 | if (!parseTexFail(TexFail, DAG, &TFE, &LWE, IsTexFail)) |
6238 | return Op; |
6239 | |
6240 | if (IsTexFail) { |
6241 | if (!DMaskLanes) { |
6242 | |
6243 | |
6244 | DMask = 0x1; |
6245 | DMaskLanes = 1; |
6246 | NumVDataDwords = 1; |
6247 | } |
6248 | NumVDataDwords += 1; |
6249 | AdjustRetType = true; |
6250 | } |
6251 | |
6252 | |
6253 | |
6254 | if (AdjustRetType) { |
6255 | |
6256 | if (DMaskLanes == 0 && !BaseOpcode->Store) { |
6257 | |
6258 | SDValue Undef = DAG.getUNDEF(Op.getValueType()); |
6259 | if (isa<MemSDNode>(Op)) |
6260 | return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL); |
6261 | return Undef; |
6262 | } |
6263 | |
6264 | EVT NewVT = NumVDataDwords > 1 ? |
6265 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumVDataDwords) |
6266 | : MVT::i32; |
6267 | |
6268 | ResultTypes[0] = NewVT; |
6269 | if (ResultTypes.size() == 3) { |
6270 | |
6271 | |
6272 | |
6273 | ResultTypes.erase(&ResultTypes[1]); |
6274 | } |
6275 | } |
6276 | |
6277 | unsigned CPol = cast<ConstantSDNode>( |
6278 | Op.getOperand(ArgOffset + Intr->CachePolicyIndex))->getZExtValue(); |
6279 | if (BaseOpcode->Atomic) |
6280 | CPol |= AMDGPU::CPol::GLC; |
6281 | if (CPol & ~AMDGPU::CPol::ALL) |
6282 | return Op; |
6283 | |
6284 | SmallVector<SDValue, 26> Ops; |
6285 | if (BaseOpcode->Store || BaseOpcode->Atomic) |
6286 | Ops.push_back(VData); |
6287 | if (UseNSA) |
6288 | append_range(Ops, VAddrs); |
6289 | else |
6290 | Ops.push_back(VAddr); |
6291 | Ops.push_back(Op.getOperand(ArgOffset + Intr->RsrcIndex)); |
6292 | if (BaseOpcode->Sampler) |
6293 | Ops.push_back(Op.getOperand(ArgOffset + Intr->SampIndex)); |
6294 | Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32)); |
6295 | if (IsGFX10Plus) |
6296 | Ops.push_back(DAG.getTargetConstant(DimInfo->Encoding, DL, MVT::i32)); |
6297 | Ops.push_back(Unorm); |
6298 | Ops.push_back(DAG.getTargetConstant(CPol, DL, MVT::i32)); |
6299 | Ops.push_back(IsA16 && |
6300 | ST->hasFeature(AMDGPU::FeatureR128A16) ? True : False); |
6301 | if (IsGFX10Plus) |
6302 | Ops.push_back(IsA16 ? True : False); |
6303 | if (!Subtarget->hasGFX90AInsts()) { |
6304 | Ops.push_back(TFE); |
6305 | } else if (cast<ConstantSDNode>(TFE)->getZExtValue()) { |
6306 | report_fatal_error("TFE is not supported on this GPU"); |
6307 | } |
6308 | Ops.push_back(LWE); |
6309 | if (!IsGFX10Plus) |
6310 | Ops.push_back(DimInfo->DA ? True : False); |
6311 | if (BaseOpcode->HasD16) |
6312 | Ops.push_back(IsD16 ? True : False); |
6313 | if (isa<MemSDNode>(Op)) |
6314 | Ops.push_back(Op.getOperand(0)); |
6315 | |
6316 | int NumVAddrDwords = |
6317 | UseNSA ? VAddrs.size() : VAddr.getValueType().getSizeInBits() / 32; |
6318 | int Opcode = -1; |
6319 | |
6320 | if (IsGFX10Plus) { |
6321 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, |
6322 | UseNSA ? AMDGPU::MIMGEncGfx10NSA |
6323 | : AMDGPU::MIMGEncGfx10Default, |
6324 | NumVDataDwords, NumVAddrDwords); |
6325 | } else { |
6326 | if (Subtarget->hasGFX90AInsts()) { |
6327 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a, |
6328 | NumVDataDwords, NumVAddrDwords); |
6329 | if (Opcode == -1) |
6330 | report_fatal_error( |
6331 | "requested image instruction is not supported on this GPU"); |
6332 | } |
6333 | if (Opcode == -1 && |
6334 | Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
6335 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, |
6336 | NumVDataDwords, NumVAddrDwords); |
6337 | if (Opcode == -1) |
6338 | Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, |
6339 | NumVDataDwords, NumVAddrDwords); |
6340 | } |
6341 | assert(Opcode != -1); |
6342 | |
6343 | MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops); |
6344 | if (auto MemOp = dyn_cast<MemSDNode>(Op)) { |
6345 | MachineMemOperand *MemRef = MemOp->getMemOperand(); |
6346 | DAG.setNodeMemRefs(NewNode, {MemRef}); |
6347 | } |
6348 | |
6349 | if (BaseOpcode->AtomicX2) { |
6350 | SmallVector<SDValue, 1> Elt; |
6351 | DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); |
6352 | return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); |
6353 | } |
6354 | if (BaseOpcode->Store) |
6355 | return SDValue(NewNode, 0); |
6356 | return constructRetValue(DAG, NewNode, |
6357 | OrigResultTypes, IsTexFail, |
6358 | Subtarget->hasUnpackedD16VMem(), IsD16, |
6359 | DMaskLanes, NumVDataDwords, DL); |
6360 | } |
6361 | |
6362 | SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, |
6363 | SDValue Offset, SDValue CachePolicy, |
6364 | SelectionDAG &DAG) const { |
6365 | MachineFunction &MF = DAG.getMachineFunction(); |
6366 | |
6367 | const DataLayout &DataLayout = DAG.getDataLayout(); |
6368 | Align Alignment = |
6369 | DataLayout.getABITypeAlign(VT.getTypeForEVT(*DAG.getContext())); |
6370 | |
6371 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
6372 | MachinePointerInfo(), |
6373 | MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | |
6374 | MachineMemOperand::MOInvariant, |
6375 | VT.getStoreSize(), Alignment); |
6376 | |
6377 | if (!Offset->isDivergent()) { |
6378 | SDValue Ops[] = { |
6379 | Rsrc, |
6380 | Offset, |
6381 | CachePolicy |
6382 | }; |
6383 | |
6384 | |
6385 | if (VT.isVector() && VT.getVectorNumElements() == 3) { |
6386 | EVT WidenedVT = |
6387 | EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); |
6388 | auto WidenedOp = DAG.getMemIntrinsicNode( |
6389 | AMDGPUISD::SBUFFER_LOAD, DL, DAG.getVTList(WidenedVT), Ops, WidenedVT, |
6390 | MF.getMachineMemOperand(MMO, 0, WidenedVT.getStoreSize())); |
6391 | auto Subvector = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, WidenedOp, |
6392 | DAG.getVectorIdxConstant(0, DL)); |
6393 | return Subvector; |
6394 | } |
6395 | |
6396 | return DAG.getMemIntrinsicNode(AMDGPUISD::SBUFFER_LOAD, DL, |
6397 | DAG.getVTList(VT), Ops, VT, MMO); |
6398 | } |
6399 | |
6400 | |
6401 | |
6402 | SmallVector<SDValue, 4> Loads; |
6403 | unsigned NumLoads = 1; |
6404 | MVT LoadVT = VT.getSimpleVT(); |
6405 | unsigned NumElts = LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1; |
6406 | assert((LoadVT.getScalarType() == MVT::i32 || |
6407 | LoadVT.getScalarType() == MVT::f32)); |
6408 | |
6409 | if (NumElts == 8 || NumElts == 16) { |
6410 | NumLoads = NumElts / 4; |
6411 | LoadVT = MVT::getVectorVT(LoadVT.getScalarType(), 4); |
6412 | } |
6413 | |
6414 | SDVTList VTList = DAG.getVTList({LoadVT, MVT::Glue}); |
6415 | SDValue Ops[] = { |
6416 | DAG.getEntryNode(), |
6417 | Rsrc, |
6418 | DAG.getConstant(0, DL, MVT::i32), |
6419 | {}, |
6420 | {}, |
6421 | {}, |
6422 | CachePolicy, |
6423 | DAG.getTargetConstant(0, DL, MVT::i1), |
6424 | }; |
6425 | |
6426 | |
6427 | |
6428 | setBufferOffsets(Offset, DAG, &Ops[3], |
6429 | NumLoads > 1 ? Align(16 * NumLoads) : Align(4)); |
6430 | |
6431 | uint64_t InstOffset = cast<ConstantSDNode>(Ops[5])->getZExtValue(); |
6432 | for (unsigned i = 0; i < NumLoads; ++i) { |
6433 | Ops[5] = DAG.getTargetConstant(InstOffset + 16 * i, DL, MVT::i32); |
6434 | Loads.push_back(getMemIntrinsicNode(AMDGPUISD::BUFFER_LOAD, DL, VTList, Ops, |
6435 | LoadVT, MMO, DAG)); |
6436 | } |
6437 | |
6438 | if (NumElts == 8 || NumElts == 16) |
6439 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Loads); |
6440 | |
6441 | return Loads[0]; |
6442 | } |
6443 | |
6444 | SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, |
6445 | SelectionDAG &DAG) const { |
6446 | MachineFunction &MF = DAG.getMachineFunction(); |
6447 | auto MFI = MF.getInfo<SIMachineFunctionInfo>(); |
6448 | |
6449 | EVT VT = Op.getValueType(); |
6450 | SDLoc DL(Op); |
6451 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
6452 | |
6453 | |
6454 | |
6455 | switch (IntrinsicID) { |
6456 | case Intrinsic::amdgcn_implicit_buffer_ptr: { |
6457 | if (getSubtarget()->isAmdHsaOrMesa(MF.getFunction())) |
6458 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6459 | return getPreloadedValue(DAG, *MFI, VT, |
6460 | AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); |
6461 | } |
6462 | case Intrinsic::amdgcn_dispatch_ptr: |
6463 | case Intrinsic::amdgcn_queue_ptr: { |
6464 | if (!Subtarget->isAmdHsaOrMesa(MF.getFunction())) { |
6465 | DiagnosticInfoUnsupported BadIntrin( |
6466 | MF.getFunction(), "unsupported hsa intrinsic without hsa target", |
6467 | DL.getDebugLoc()); |
6468 | DAG.getContext()->diagnose(BadIntrin); |
6469 | return DAG.getUNDEF(VT); |
6470 | } |
6471 | |
6472 | auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? |
6473 | AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; |
6474 | return getPreloadedValue(DAG, *MFI, VT, RegID); |
6475 | } |
6476 | case Intrinsic::amdgcn_implicitarg_ptr: { |
6477 | if (MFI->isEntryFunction()) |
6478 | return getImplicitArgPtr(DAG, DL); |
6479 | return getPreloadedValue(DAG, *MFI, VT, |
6480 | AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); |
6481 | } |
6482 | case Intrinsic::amdgcn_kernarg_segment_ptr: { |
6483 | if (!AMDGPU::isKernel(MF.getFunction().getCallingConv())) { |
6484 | |
6485 | return DAG.getConstant(0, DL, VT); |
6486 | } |
6487 | |
6488 | return getPreloadedValue(DAG, *MFI, VT, |
6489 | AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); |
6490 | } |
6491 | case Intrinsic::amdgcn_dispatch_id: { |
6492 | return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); |
6493 | } |
6494 | case Intrinsic::amdgcn_rcp: |
6495 | return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); |
6496 | case Intrinsic::amdgcn_rsq: |
6497 | return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); |
6498 | case Intrinsic::amdgcn_rsq_legacy: |
6499 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
6500 | return emitRemovedIntrinsicError(DAG, DL, VT); |
6501 | return SDValue(); |
6502 | case Intrinsic::amdgcn_rcp_legacy: |
6503 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
6504 | return emitRemovedIntrinsicError(DAG, DL, VT); |
6505 | return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); |
6506 | case Intrinsic::amdgcn_rsq_clamp: { |
6507 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) |
6508 | return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); |
6509 | |
6510 | Type *Type = VT.getTypeForEVT(*DAG.getContext()); |
6511 | APFloat Max = APFloat::getLargest(Type->getFltSemantics()); |
6512 | APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); |
6513 | |
6514 | SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); |
6515 | SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, |
6516 | DAG.getConstantFP(Max, DL, VT)); |
6517 | return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, |
6518 | DAG.getConstantFP(Min, DL, VT)); |
6519 | } |
6520 | case Intrinsic::r600_read_ngroups_x: |
6521 | if (Subtarget->isAmdHsaOS()) |
6522 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6523 | |
6524 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
6525 | SI::KernelInputOffsets::NGROUPS_X, Align(4), |
6526 | false); |
6527 | case Intrinsic::r600_read_ngroups_y: |
6528 | if (Subtarget->isAmdHsaOS()) |
6529 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6530 | |
6531 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
6532 | SI::KernelInputOffsets::NGROUPS_Y, Align(4), |
6533 | false); |
6534 | case Intrinsic::r600_read_ngroups_z: |
6535 | if (Subtarget->isAmdHsaOS()) |
6536 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6537 | |
6538 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
6539 | SI::KernelInputOffsets::NGROUPS_Z, Align(4), |
6540 | false); |
6541 | case Intrinsic::r600_read_global_size_x: |
6542 | if (Subtarget->isAmdHsaOS()) |
6543 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6544 | |
6545 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
6546 | SI::KernelInputOffsets::GLOBAL_SIZE_X, |
6547 | Align(4), false); |
6548 | case Intrinsic::r600_read_global_size_y: |
6549 | if (Subtarget->isAmdHsaOS()) |
6550 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6551 | |
6552 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
6553 | SI::KernelInputOffsets::GLOBAL_SIZE_Y, |
6554 | Align(4), false); |
6555 | case Intrinsic::r600_read_global_size_z: |
6556 | if (Subtarget->isAmdHsaOS()) |
6557 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6558 | |
6559 | return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), |
6560 | SI::KernelInputOffsets::GLOBAL_SIZE_Z, |
6561 | Align(4), false); |
6562 | case Intrinsic::r600_read_local_size_x: |
6563 | if (Subtarget->isAmdHsaOS()) |
6564 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6565 | |
6566 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
6567 | SI::KernelInputOffsets::LOCAL_SIZE_X); |
6568 | case Intrinsic::r600_read_local_size_y: |
6569 | if (Subtarget->isAmdHsaOS()) |
6570 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6571 | |
6572 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
6573 | SI::KernelInputOffsets::LOCAL_SIZE_Y); |
6574 | case Intrinsic::r600_read_local_size_z: |
6575 | if (Subtarget->isAmdHsaOS()) |
6576 | return emitNonHSAIntrinsicError(DAG, DL, VT); |
6577 | |
6578 | return lowerImplicitZextParam(DAG, Op, MVT::i16, |
6579 | SI::KernelInputOffsets::LOCAL_SIZE_Z); |
6580 | case Intrinsic::amdgcn_workgroup_id_x: |
6581 | return getPreloadedValue(DAG, *MFI, VT, |
6582 | AMDGPUFunctionArgInfo::WORKGROUP_ID_X); |
6583 | case Intrinsic::amdgcn_workgroup_id_y: |
6584 | return getPreloadedValue(DAG, *MFI, VT, |
6585 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); |
6586 | case Intrinsic::amdgcn_workgroup_id_z: |
6587 | return getPreloadedValue(DAG, *MFI, VT, |
6588 | AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); |
6589 | case Intrinsic::amdgcn_workitem_id_x: |
6590 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, |
6591 | SDLoc(DAG.getEntryNode()), |
6592 | MFI->getArgInfo().WorkItemIDX); |
6593 | case Intrinsic::amdgcn_workitem_id_y: |
6594 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, |
6595 | SDLoc(DAG.getEntryNode()), |
6596 | MFI->getArgInfo().WorkItemIDY); |
6597 | case Intrinsic::amdgcn_workitem_id_z: |
6598 | return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, |
6599 | SDLoc(DAG.getEntryNode()), |
6600 | MFI->getArgInfo().WorkItemIDZ); |
6601 | case Intrinsic::amdgcn_wavefrontsize: |
6602 | return DAG.getConstant(MF.getSubtarget<GCNSubtarget>().getWavefrontSize(), |
6603 | SDLoc(Op), MVT::i32); |
6604 | case Intrinsic::amdgcn_s_buffer_load: { |
6605 | unsigned CPol = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); |
6606 | if (CPol & ~AMDGPU::CPol::ALL) |
6607 | return Op; |
6608 | return lowerSBuffer(VT, DL, Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
6609 | DAG); |
6610 | } |
6611 | case Intrinsic::amdgcn_fdiv_fast: |
6612 | return lowerFDIV_FAST(Op, DAG); |
6613 | case Intrinsic::amdgcn_sin: |
6614 | return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); |
6615 | |
6616 | case Intrinsic::amdgcn_cos: |
6617 | return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); |
6618 | |
6619 | case Intrinsic::amdgcn_mul_u24: |
6620 | return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, Op.getOperand(1), Op.getOperand(2)); |
6621 | case Intrinsic::amdgcn_mul_i24: |
6622 | return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, Op.getOperand(1), Op.getOperand(2)); |
6623 | |
6624 | case Intrinsic::amdgcn_log_clamp: { |
6625 | if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) |
6626 | return SDValue(); |
6627 | |
6628 | return emitRemovedIntrinsicError(DAG, DL, VT); |
6629 | } |
6630 | case Intrinsic::amdgcn_ldexp: |
6631 | return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, |
6632 | Op.getOperand(1), Op.getOperand(2)); |
6633 | |
6634 | case Intrinsic::amdgcn_fract: |
6635 | return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); |
6636 | |
6637 | case Intrinsic::amdgcn_class: |
6638 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, |
6639 | Op.getOperand(1), Op.getOperand(2)); |
6640 | case Intrinsic::amdgcn_div_fmas: |
6641 | return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, |
6642 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
6643 | Op.getOperand(4)); |
6644 | |
6645 | case Intrinsic::amdgcn_div_fixup: |
6646 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, |
6647 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
6648 | |
6649 | case Intrinsic::amdgcn_div_scale: { |
6650 | const ConstantSDNode *Param = cast<ConstantSDNode>(Op.getOperand(3)); |
6651 | |
6652 | |
6653 | |
6654 | SDValue Numerator = Op.getOperand(1); |
6655 | SDValue Denominator = Op.getOperand(2); |
6656 | |
6657 | |
6658 | |
6659 | |
6660 | |
6661 | |
6662 | SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; |
6663 | |
6664 | return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, |
6665 | Denominator, Numerator); |
6666 | } |
6667 | case Intrinsic::amdgcn_icmp: { |
6668 | |
6669 | if (Op.getOperand(1).getValueType() == MVT::i1 && |
6670 | Op.getConstantOperandVal(2) == 0 && |
6671 | Op.getConstantOperandVal(3) == ICmpInst::Predicate::ICMP_NE) |
6672 | return Op; |
6673 | return lowerICMPIntrinsic(*this, Op.getNode(), DAG); |
6674 | } |
6675 | case Intrinsic::amdgcn_fcmp: { |
6676 | return lowerFCMPIntrinsic(*this, Op.getNode(), DAG); |
6677 | } |
6678 | case Intrinsic::amdgcn_ballot: |
6679 | return lowerBALLOTIntrinsic(*this, Op.getNode(), DAG); |
6680 | case Intrinsic::amdgcn_fmed3: |
6681 | return DAG.getNode(AMDGPUISD::FMED3, DL, VT, |
6682 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
6683 | case Intrinsic::amdgcn_fdot2: |
6684 | return DAG.getNode(AMDGPUISD::FDOT2, DL, VT, |
6685 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), |
6686 | Op.getOperand(4)); |
6687 | case Intrinsic::amdgcn_fmul_legacy: |
6688 | return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, |
6689 | Op.getOperand(1), Op.getOperand(2)); |
6690 | case Intrinsic::amdgcn_sffbh: |
6691 | return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); |
6692 | case Intrinsic::amdgcn_sbfe: |
6693 | return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, |
6694 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
6695 | case Intrinsic::amdgcn_ubfe: |
6696 | return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, |
6697 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
6698 | case Intrinsic::amdgcn_cvt_pkrtz: |
6699 | case Intrinsic::amdgcn_cvt_pknorm_i16: |
6700 | case Intrinsic::amdgcn_cvt_pknorm_u16: |
6701 | case Intrinsic::amdgcn_cvt_pk_i16: |
6702 | case Intrinsic::amdgcn_cvt_pk_u16: { |
6703 | |
6704 | EVT VT = Op.getValueType(); |
6705 | unsigned Opcode; |
6706 | |
6707 | if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) |
6708 | Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; |
6709 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) |
6710 | Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; |
6711 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) |
6712 | Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; |
6713 | else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) |
6714 | Opcode = AMDGPUISD::CVT_PK_I16_I32; |
6715 | else |
6716 | Opcode = AMDGPUISD::CVT_PK_U16_U32; |
6717 | |
6718 | if (isTypeLegal(VT)) |
6719 | return DAG.getNode(Opcode, DL, VT, Op.getOperand(1), Op.getOperand(2)); |
6720 | |
6721 | SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, |
6722 | Op.getOperand(1), Op.getOperand(2)); |
6723 | return DAG.getNode(ISD::BITCAST, DL, VT, Node); |
6724 | } |
6725 | case Intrinsic::amdgcn_fmad_ftz: |
6726 | return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), |
6727 | Op.getOperand(2), Op.getOperand(3)); |
6728 | |
6729 | case Intrinsic::amdgcn_if_break: |
6730 | return SDValue(DAG.getMachineNode(AMDGPU::SI_IF_BREAK, DL, VT, |
6731 | Op->getOperand(1), Op->getOperand(2)), 0); |
6732 | |
6733 | case Intrinsic::amdgcn_groupstaticsize: { |
6734 | Triple::OSType OS = getTargetMachine().getTargetTriple().getOS(); |
6735 | if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) |
6736 | return Op; |
6737 | |
6738 | const Module *M = MF.getFunction().getParent(); |
6739 | const GlobalValue *GV = |
6740 | M->getNamedValue(Intrinsic::getName(Intrinsic::amdgcn_groupstaticsize)); |
6741 | SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0, |
6742 | SIInstrInfo::MO_ABS32_LO); |
6743 | return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; |
6744 | } |
6745 | case Intrinsic::amdgcn_is_shared: |
6746 | case Intrinsic::amdgcn_is_private: { |
6747 | SDLoc SL(Op); |
6748 | unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ? |
6749 | AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; |
6750 | SDValue Aperture = getSegmentAperture(AS, SL, DAG); |
6751 | SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, |
6752 | Op.getOperand(1)); |
6753 | |
6754 | SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec, |
6755 | DAG.getConstant(1, SL, MVT::i32)); |
6756 | return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ); |
6757 | } |
6758 | case Intrinsic::amdgcn_alignbit: |
6759 | return DAG.getNode(ISD::FSHR, DL, VT, |
6760 | Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); |
6761 | case Intrinsic::amdgcn_perm: |
6762 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, Op.getOperand(1), |
6763 | Op.getOperand(2), Op.getOperand(3)); |
6764 | case Intrinsic::amdgcn_reloc_constant: { |
6765 | Module *M = const_cast<Module *>(MF.getFunction().getParent()); |
6766 | const MDNode *Metadata = cast<MDNodeSDNode>(Op.getOperand(1))->getMD(); |
6767 | auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); |
6768 | auto RelocSymbol = cast<GlobalVariable>( |
6769 | M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); |
6770 | SDValue GA = DAG.getTargetGlobalAddress(RelocSymbol, DL, MVT::i32, 0, |
6771 | SIInstrInfo::MO_ABS32_LO); |
6772 | return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; |
6773 | } |
6774 | default: |
6775 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = |
6776 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) |
6777 | return lowerImage(Op, ImageDimIntr, DAG, false); |
6778 | |
6779 | return Op; |
6780 | } |
6781 | } |
6782 | |
6783 | |
6784 | static void updateBufferMMO(MachineMemOperand *MMO, SDValue VOffset, |
6785 | SDValue SOffset, SDValue Offset, |
6786 | SDValue VIndex = SDValue()) { |
6787 | if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) || |
6788 | !isa<ConstantSDNode>(Offset)) { |
6789 | |
6790 | |
6791 | MMO->setValue((Value *)nullptr); |
6792 | return; |
6793 | } |
6794 | |
6795 | if (VIndex && (!isa<ConstantSDNode>(VIndex) || |
6796 | !cast<ConstantSDNode>(VIndex)->isNullValue())) { |
6797 | |
6798 | |
6799 | MMO->setValue((Value *)nullptr); |
6800 | return; |
6801 | } |
6802 | |
6803 | MMO->setOffset(cast<ConstantSDNode>(VOffset)->getSExtValue() + |
6804 | cast<ConstantSDNode>(SOffset)->getSExtValue() + |
6805 | cast<ConstantSDNode>(Offset)->getSExtValue()); |
6806 | } |
6807 | |
6808 | SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op, |
6809 | SelectionDAG &DAG, |
6810 | unsigned NewOpcode) const { |
6811 | SDLoc DL(Op); |
6812 | |
6813 | SDValue VData = Op.getOperand(2); |
6814 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
6815 | SDValue Ops[] = { |
6816 | Op.getOperand(0), |
6817 | VData, |
6818 | Op.getOperand(3), |
6819 | DAG.getConstant(0, DL, MVT::i32), |
6820 | Offsets.first, |
6821 | Op.getOperand(5), |
6822 | Offsets.second, |
6823 | Op.getOperand(6), |
6824 | DAG.getTargetConstant(0, DL, MVT::i1), |
6825 | }; |
6826 | |
6827 | auto *M = cast<MemSDNode>(Op); |
6828 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]); |
6829 | |
6830 | EVT MemVT = VData.getValueType(); |
6831 | return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, |
6832 | M->getMemOperand()); |
6833 | } |
6834 | |
6835 | |
6836 | static unsigned getIdxEn(SDValue VIndex) { |
6837 | if (auto VIndexC = dyn_cast<ConstantSDNode>(VIndex)) |
6838 | |
6839 | return VIndexC->getZExtValue() != 0; |
6840 | return 1; |
6841 | } |
6842 | |
6843 | SDValue |
6844 | SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG, |
6845 | unsigned NewOpcode) const { |
6846 | SDLoc DL(Op); |
6847 | |
6848 | SDValue VData = Op.getOperand(2); |
6849 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
6850 | SDValue Ops[] = { |
6851 | Op.getOperand(0), |
6852 | VData, |
6853 | Op.getOperand(3), |
6854 | Op.getOperand(4), |
6855 | Offsets.first, |
6856 | Op.getOperand(6), |
6857 | Offsets.second, |
6858 | Op.getOperand(7), |
6859 | DAG.getTargetConstant(1, DL, MVT::i1), |
6860 | }; |
6861 | |
6862 | auto *M = cast<MemSDNode>(Op); |
6863 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); |
6864 | |
6865 | EVT MemVT = VData.getValueType(); |
6866 | return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, |
6867 | M->getMemOperand()); |
6868 | } |
6869 | |
6870 | SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, |
6871 | SelectionDAG &DAG) const { |
6872 | unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
6873 | SDLoc DL(Op); |
6874 | |
6875 | switch (IntrID) { |
6876 | case Intrinsic::amdgcn_ds_ordered_add: |
6877 | case Intrinsic::amdgcn_ds_ordered_swap: { |
6878 | MemSDNode *M = cast<MemSDNode>(Op); |
6879 | SDValue Chain = M->getOperand(0); |
6880 | SDValue M0 = M->getOperand(2); |
6881 | SDValue Value = M->getOperand(3); |
6882 | unsigned IndexOperand = M->getConstantOperandVal(7); |
6883 | unsigned WaveRelease = M->getConstantOperandVal(8); |
6884 | unsigned WaveDone = M->getConstantOperandVal(9); |
6885 | |
6886 | unsigned OrderedCountIndex = IndexOperand & 0x3f; |
6887 | IndexOperand &= ~0x3f; |
6888 | unsigned CountDw = 0; |
6889 | |
6890 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) { |
6891 | CountDw = (IndexOperand >> 24) & 0xf; |
6892 | IndexOperand &= ~(0xf << 24); |
6893 | |
6894 | if (CountDw < 1 || CountDw > 4) { |
6895 | report_fatal_error( |
6896 | "ds_ordered_count: dword count must be between 1 and 4"); |
6897 | } |
6898 | } |
6899 | |
6900 | if (IndexOperand) |
6901 | report_fatal_error("ds_ordered_count: bad index operand"); |
6902 | |
6903 | if (WaveDone && !WaveRelease) |
6904 | report_fatal_error("ds_ordered_count: wave_done requires wave_release"); |
6905 | |
6906 | unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; |
6907 | unsigned ShaderType = |
6908 | SIInstrInfo::getDSShaderTypeValue(DAG.getMachineFunction()); |
6909 | unsigned Offset0 = OrderedCountIndex << 2; |
6910 | unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | |
6911 | (Instruction << 4); |
6912 | |
6913 | if (Subtarget->getGeneration() >= AMDGPUSubtarget::GFX10) |
6914 | Offset1 |= (CountDw - 1) << 6; |
6915 | |
6916 | unsigned Offset = Offset0 | (Offset1 << 8); |
6917 | |
6918 | SDValue Ops[] = { |
6919 | Chain, |
6920 | Value, |
6921 | DAG.getTargetConstant(Offset, DL, MVT::i16), |
6922 | copyToM0(DAG, Chain, DL, M0).getValue(1), |
6923 | }; |
6924 | return DAG.getMemIntrinsicNode(AMDGPUISD::DS_ORDERED_COUNT, DL, |
6925 | M->getVTList(), Ops, M->getMemoryVT(), |
6926 | M->getMemOperand()); |
6927 | } |
6928 | case Intrinsic::amdgcn_ds_fadd: { |
6929 | MemSDNode *M = cast<MemSDNode>(Op); |
6930 | unsigned Opc; |
6931 | switch (IntrID) { |
6932 | case Intrinsic::amdgcn_ds_fadd: |
6933 | Opc = ISD::ATOMIC_LOAD_FADD; |
6934 | break; |
6935 | } |
6936 | |
6937 | return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(), |
6938 | M->getOperand(0), M->getOperand(2), M->getOperand(3), |
6939 | M->getMemOperand()); |
6940 | } |
6941 | case Intrinsic::amdgcn_atomic_inc: |
6942 | case Intrinsic::amdgcn_atomic_dec: |
6943 | case Intrinsic::amdgcn_ds_fmin: |
6944 | case Intrinsic::amdgcn_ds_fmax: { |
6945 | MemSDNode *M = cast<MemSDNode>(Op); |
6946 | unsigned Opc; |
6947 | switch (IntrID) { |
6948 | case Intrinsic::amdgcn_atomic_inc: |
6949 | Opc = AMDGPUISD::ATOMIC_INC; |
6950 | break; |
6951 | case Intrinsic::amdgcn_atomic_dec: |
6952 | Opc = AMDGPUISD::ATOMIC_DEC; |
6953 | break; |
6954 | case Intrinsic::amdgcn_ds_fmin: |
6955 | Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; |
6956 | break; |
6957 | case Intrinsic::amdgcn_ds_fmax: |
6958 | Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; |
6959 | break; |
6960 | default: |
6961 | llvm_unreachable("Unknown intrinsic!"); |
6962 | } |
6963 | SDValue Ops[] = { |
6964 | M->getOperand(0), |
6965 | M->getOperand(2), |
6966 | M->getOperand(3) |
6967 | }; |
6968 | |
6969 | return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, |
6970 | M->getMemoryVT(), M->getMemOperand()); |
6971 | } |
6972 | case Intrinsic::amdgcn_buffer_load: |
6973 | case Intrinsic::amdgcn_buffer_load_format: { |
6974 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); |
6975 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
6976 | unsigned IdxEn = getIdxEn(Op.getOperand(3)); |
6977 | SDValue Ops[] = { |
6978 | Op.getOperand(0), |
6979 | Op.getOperand(2), |
6980 | Op.getOperand(3), |
6981 | SDValue(), |
6982 | SDValue(), |
6983 | SDValue(), |
6984 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), |
6985 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), |
6986 | }; |
6987 | setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]); |
6988 | |
6989 | unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? |
6990 | AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; |
6991 | |
6992 | EVT VT = Op.getValueType(); |
6993 | EVT IntVT = VT.changeTypeToInteger(); |
6994 | auto *M = cast<MemSDNode>(Op); |
6995 | updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]); |
6996 | EVT LoadVT = Op.getValueType(); |
6997 | |
6998 | if (LoadVT.getScalarType() == MVT::f16) |
6999 | return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, |
7000 | M, DAG, Ops); |
7001 | |
7002 | |
7003 | if (LoadVT.getScalarType() == MVT::i8 || |
7004 | LoadVT.getScalarType() == MVT::i16) |
7005 | return handleByteShortBufferLoads(DAG, LoadVT, DL, Ops, M); |
7006 | |
7007 | return getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, |
7008 | M->getMemOperand(), DAG); |
7009 | } |
7010 | case Intrinsic::amdgcn_raw_buffer_load: |
7011 | case Intrinsic::amdgcn_raw_buffer_load_format: { |
7012 | const bool IsFormat = IntrID == Intrinsic::amdgcn_raw_buffer_load_format; |
7013 | |
7014 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); |
7015 | SDValue Ops[] = { |
7016 | Op.getOperand(0), |
7017 | Op.getOperand(2), |
7018 | DAG.getConstant(0, DL, MVT::i32), |
7019 | Offsets.first, |
7020 | Op.getOperand(4), |
7021 | Offsets.second, |
7022 | Op.getOperand(5), |
7023 | DAG.getTargetConstant(0, DL, MVT::i1), |
7024 | }; |
7025 | |
7026 | auto *M = cast<MemSDNode>(Op); |
7027 | updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5]); |
7028 | return lowerIntrinsicLoad(M, IsFormat, DAG, Ops); |
7029 | } |
7030 | case Intrinsic::amdgcn_struct_buffer_load: |
7031 | case Intrinsic::amdgcn_struct_buffer_load_format: { |
7032 | const bool IsFormat = IntrID == Intrinsic::amdgcn_struct_buffer_load_format; |
7033 | |
7034 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
7035 | SDValue Ops[] = { |
7036 | Op.getOperand(0), |
7037 | Op.getOperand(2), |
7038 | Op.getOperand(3), |
7039 | Offsets.first, |
7040 | Op.getOperand(5), |
7041 | Offsets.second, |
7042 | Op.getOperand(6), |
7043 | DAG.getTargetConstant(1, DL, MVT::i1), |
7044 | }; |
7045 | |
7046 | auto *M = cast<MemSDNode>(Op); |
7047 | updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]); |
7048 | return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops); |
7049 | } |
7050 | case Intrinsic::amdgcn_tbuffer_load: { |
7051 | MemSDNode *M = cast<MemSDNode>(Op); |
7052 | EVT LoadVT = Op.getValueType(); |
7053 | |
7054 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); |
7055 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); |
7056 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); |
7057 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); |
7058 | unsigned IdxEn = getIdxEn(Op.getOperand(3)); |
7059 | SDValue Ops[] = { |
7060 | Op.getOperand(0), |
7061 | Op.getOperand(2), |
7062 | Op.getOperand(3), |
7063 | Op.getOperand(4), |
7064 | Op.getOperand(5), |
7065 | Op.getOperand(6), |
7066 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), |
7067 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), |
7068 | DAG.getTargetConstant(IdxEn, DL, MVT::i1) |
7069 | }; |
7070 | |
7071 | if (LoadVT.getScalarType() == MVT::f16) |
7072 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, |
7073 | M, DAG, Ops); |
7074 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, |
7075 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), |
7076 | DAG); |
7077 | } |
7078 | case Intrinsic::amdgcn_raw_tbuffer_load: { |
7079 | MemSDNode *M = cast<MemSDNode>(Op); |
7080 | EVT LoadVT = Op.getValueType(); |
7081 | auto Offsets = splitBufferOffsets(Op.getOperand(3), DAG); |
7082 | |
7083 | SDValue Ops[] = { |
7084 | Op.getOperand(0), |
7085 | Op.getOperand(2), |
7086 | DAG.getConstant(0, DL, MVT::i32), |
7087 | Offsets.first, |
7088 | Op.getOperand(4), |
7089 | Offsets.second, |
7090 | Op.getOperand(5), |
7091 | Op.getOperand(6), |
7092 | DAG.getTargetConstant(0, DL, MVT::i1), |
7093 | }; |
7094 | |
7095 | if (LoadVT.getScalarType() == MVT::f16) |
7096 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, |
7097 | M, DAG, Ops); |
7098 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, |
7099 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), |
7100 | DAG); |
7101 | } |
7102 | case Intrinsic::amdgcn_struct_tbuffer_load: { |
7103 | MemSDNode *M = cast<MemSDNode>(Op); |
7104 | EVT LoadVT = Op.getValueType(); |
7105 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
7106 | |
7107 | SDValue Ops[] = { |
7108 | Op.getOperand(0), |
7109 | Op.getOperand(2), |
7110 | Op.getOperand(3), |
7111 | Offsets.first, |
7112 | Op.getOperand(5), |
7113 | Offsets.second, |
7114 | Op.getOperand(6), |
7115 | Op.getOperand(7), |
7116 | DAG.getTargetConstant(1, DL, MVT::i1), |
7117 | }; |
7118 | |
7119 | if (LoadVT.getScalarType() == MVT::f16) |
7120 | return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, |
7121 | M, DAG, Ops); |
7122 | return getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, |
7123 | Op->getVTList(), Ops, LoadVT, M->getMemOperand(), |
7124 | DAG); |
7125 | } |
7126 | case Intrinsic::amdgcn_buffer_atomic_swap: |
7127 | case Intrinsic::amdgcn_buffer_atomic_add: |
7128 | case Intrinsic::amdgcn_buffer_atomic_sub: |
7129 | case Intrinsic::amdgcn_buffer_atomic_csub: |
7130 | case Intrinsic::amdgcn_buffer_atomic_smin: |
7131 | case Intrinsic::amdgcn_buffer_atomic_umin: |
7132 | case Intrinsic::amdgcn_buffer_atomic_smax: |
7133 | case Intrinsic::amdgcn_buffer_atomic_umax: |
7134 | case Intrinsic::amdgcn_buffer_atomic_and: |
7135 | case Intrinsic::amdgcn_buffer_atomic_or: |
7136 | case Intrinsic::amdgcn_buffer_atomic_xor: |
7137 | case Intrinsic::amdgcn_buffer_atomic_fadd: { |
7138 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
7139 | unsigned IdxEn = getIdxEn(Op.getOperand(4)); |
7140 | SDValue Ops[] = { |
7141 | Op.getOperand(0), |
7142 | Op.getOperand(2), |
7143 | Op.getOperand(3), |
7144 | Op.getOperand(4), |
7145 | SDValue(), |
7146 | SDValue(), |
7147 | SDValue(), |
7148 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), |
7149 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), |
7150 | }; |
7151 | setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); |
7152 | |
7153 | EVT VT = Op.getValueType(); |
7154 | |
7155 | auto *M = cast<MemSDNode>(Op); |
7156 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); |
7157 | unsigned Opcode = 0; |
7158 | |
7159 | switch (IntrID) { |
7160 | case Intrinsic::amdgcn_buffer_atomic_swap: |
7161 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; |
7162 | break; |
7163 | case Intrinsic::amdgcn_buffer_atomic_add: |
7164 | Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; |
7165 | break; |
7166 | case Intrinsic::amdgcn_buffer_atomic_sub: |
7167 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; |
7168 | break; |
7169 | case Intrinsic::amdgcn_buffer_atomic_csub: |
7170 | Opcode = AMDGPUISD::BUFFER_ATOMIC_CSUB; |
7171 | break; |
7172 | case Intrinsic::amdgcn_buffer_atomic_smin: |
7173 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; |
7174 | break; |
7175 | case Intrinsic::amdgcn_buffer_atomic_umin: |
7176 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; |
7177 | break; |
7178 | case Intrinsic::amdgcn_buffer_atomic_smax: |
7179 | Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; |
7180 | break; |
7181 | case Intrinsic::amdgcn_buffer_atomic_umax: |
7182 | Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; |
7183 | break; |
7184 | case Intrinsic::amdgcn_buffer_atomic_and: |
7185 | Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; |
7186 | break; |
7187 | case Intrinsic::amdgcn_buffer_atomic_or: |
7188 | Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; |
7189 | break; |
7190 | case Intrinsic::amdgcn_buffer_atomic_xor: |
7191 | Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; |
7192 | break; |
7193 | case Intrinsic::amdgcn_buffer_atomic_fadd: |
7194 | if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) { |
7195 | DiagnosticInfoUnsupported |
7196 | NoFpRet(DAG.getMachineFunction().getFunction(), |
7197 | "return versions of fp atomics not supported", |
7198 | DL.getDebugLoc(), DS_Error); |
7199 | DAG.getContext()->diagnose(NoFpRet); |
7200 | return SDValue(); |
7201 | } |
7202 | Opcode = AMDGPUISD::BUFFER_ATOMIC_FADD; |
7203 | break; |
7204 | default: |
7205 | llvm_unreachable("unhandled atomic opcode"); |
7206 | } |
7207 | |
7208 | return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, |
7209 | M->getMemOperand()); |
7210 | } |
7211 | case Intrinsic::amdgcn_raw_buffer_atomic_fadd: |
7212 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); |
7213 | case Intrinsic::amdgcn_struct_buffer_atomic_fadd: |
7214 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FADD); |
7215 | case Intrinsic::amdgcn_raw_buffer_atomic_fmin: |
7216 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN); |
7217 | case Intrinsic::amdgcn_struct_buffer_atomic_fmin: |
7218 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMIN); |
7219 | case Intrinsic::amdgcn_raw_buffer_atomic_fmax: |
7220 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX); |
7221 | case Intrinsic::amdgcn_struct_buffer_atomic_fmax: |
7222 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_FMAX); |
7223 | case Intrinsic::amdgcn_raw_buffer_atomic_swap: |
7224 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SWAP); |
7225 | case Intrinsic::amdgcn_raw_buffer_atomic_add: |
7226 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); |
7227 | case Intrinsic::amdgcn_raw_buffer_atomic_sub: |
7228 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); |
7229 | case Intrinsic::amdgcn_raw_buffer_atomic_smin: |
7230 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMIN); |
7231 | case Intrinsic::amdgcn_raw_buffer_atomic_umin: |
7232 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMIN); |
7233 | case Intrinsic::amdgcn_raw_buffer_atomic_smax: |
7234 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SMAX); |
7235 | case Intrinsic::amdgcn_raw_buffer_atomic_umax: |
7236 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_UMAX); |
7237 | case Intrinsic::amdgcn_raw_buffer_atomic_and: |
7238 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); |
7239 | case Intrinsic::amdgcn_raw_buffer_atomic_or: |
7240 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); |
7241 | case Intrinsic::amdgcn_raw_buffer_atomic_xor: |
7242 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); |
7243 | case Intrinsic::amdgcn_raw_buffer_atomic_inc: |
7244 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); |
7245 | case Intrinsic::amdgcn_raw_buffer_atomic_dec: |
7246 | return lowerRawBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); |
7247 | case Intrinsic::amdgcn_struct_buffer_atomic_swap: |
7248 | return lowerStructBufferAtomicIntrin(Op, DAG, |
7249 | AMDGPUISD::BUFFER_ATOMIC_SWAP); |
7250 | case Intrinsic::amdgcn_struct_buffer_atomic_add: |
7251 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_ADD); |
7252 | case Intrinsic::amdgcn_struct_buffer_atomic_sub: |
7253 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_SUB); |
7254 | case Intrinsic::amdgcn_struct_buffer_atomic_smin: |
7255 | return lowerStructBufferAtomicIntrin(Op, DAG, |
7256 | AMDGPUISD::BUFFER_ATOMIC_SMIN); |
7257 | case Intrinsic::amdgcn_struct_buffer_atomic_umin: |
7258 | return lowerStructBufferAtomicIntrin(Op, DAG, |
7259 | AMDGPUISD::BUFFER_ATOMIC_UMIN); |
7260 | case Intrinsic::amdgcn_struct_buffer_atomic_smax: |
7261 | return lowerStructBufferAtomicIntrin(Op, DAG, |
7262 | AMDGPUISD::BUFFER_ATOMIC_SMAX); |
7263 | case Intrinsic::amdgcn_struct_buffer_atomic_umax: |
7264 | return lowerStructBufferAtomicIntrin(Op, DAG, |
7265 | AMDGPUISD::BUFFER_ATOMIC_UMAX); |
7266 | case Intrinsic::amdgcn_struct_buffer_atomic_and: |
7267 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_AND); |
7268 | case Intrinsic::amdgcn_struct_buffer_atomic_or: |
7269 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_OR); |
7270 | case Intrinsic::amdgcn_struct_buffer_atomic_xor: |
7271 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_XOR); |
7272 | case Intrinsic::amdgcn_struct_buffer_atomic_inc: |
7273 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_INC); |
7274 | case Intrinsic::amdgcn_struct_buffer_atomic_dec: |
7275 | return lowerStructBufferAtomicIntrin(Op, DAG, AMDGPUISD::BUFFER_ATOMIC_DEC); |
7276 | |
7277 | case Intrinsic::amdgcn_buffer_atomic_cmpswap: { |
7278 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); |
7279 | unsigned IdxEn = getIdxEn(Op.getOperand(5)); |
7280 | SDValue Ops[] = { |
7281 | Op.getOperand(0), |
7282 | Op.getOperand(2), |
7283 | Op.getOperand(3), |
7284 | Op.getOperand(4), |
7285 | Op.getOperand(5), |
7286 | SDValue(), |
7287 | SDValue(), |
7288 | SDValue(), |
7289 | DAG.getTargetConstant(Slc << 1, DL, MVT::i32), |
7290 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), |
7291 | }; |
7292 | setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]); |
7293 | |
7294 | EVT VT = Op.getValueType(); |
7295 | auto *M = cast<MemSDNode>(Op); |
7296 | updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]); |
7297 | |
7298 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, |
7299 | Op->getVTList(), Ops, VT, M->getMemOperand()); |
7300 | } |
7301 | case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap: { |
7302 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
7303 | SDValue Ops[] = { |
7304 | Op.getOperand(0), |
7305 | Op.getOperand(2), |
7306 | Op.getOperand(3), |
7307 | Op.getOperand(4), |
7308 | DAG.getConstant(0, DL, MVT::i32), |
7309 | Offsets.first, |
7310 | Op.getOperand(6), |
7311 | Offsets.second, |
7312 | Op.getOperand(7), |
7313 | DAG.getTargetConstant(0, DL, MVT::i1), |
7314 | }; |
7315 | EVT VT = Op.getValueType(); |
7316 | auto *M = cast<MemSDNode>(Op); |
7317 | updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7]); |
7318 | |
7319 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, |
7320 | Op->getVTList(), Ops, VT, M->getMemOperand()); |
7321 | } |
7322 | case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap: { |
7323 | auto Offsets = splitBufferOffsets(Op.getOperand(6), DAG); |
7324 | SDValue Ops[] = { |
7325 | Op.getOperand(0), |
7326 | Op.getOperand(2), |
7327 | Op.getOperand(3), |
7328 | Op.getOperand(4), |
7329 | Op.getOperand(5), |
7330 | Offsets.first, |
7331 | Op.getOperand(7), |
7332 | Offsets.second, |
7333 | Op.getOperand(8), |
7334 | DAG.getTargetConstant(1, DL, MVT::i1), |
7335 | }; |
7336 | EVT VT = Op.getValueType(); |
7337 | auto *M = cast<MemSDNode>(Op); |
7338 | updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]); |
7339 | |
7340 | return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, |
7341 | Op->getVTList(), Ops, VT, M->getMemOperand()); |
7342 | } |
7343 | case Intrinsic::amdgcn_image_bvh_intersect_ray: { |
7344 | SDLoc DL(Op); |
7345 | MemSDNode *M = cast<MemSDNode>(Op); |
7346 | SDValue NodePtr = M->getOperand(2); |
7347 | SDValue RayExtent = M->getOperand(3); |
7348 | SDValue RayOrigin = M->getOperand(4); |
7349 | SDValue RayDir = M->getOperand(5); |
7350 | SDValue RayInvDir = M->getOperand(6); |
7351 | SDValue TDescr = M->getOperand(7); |
7352 | |
7353 | assert(NodePtr.getValueType() == MVT::i32 || |
7354 | NodePtr.getValueType() == MVT::i64); |
7355 | assert(RayDir.getValueType() == MVT::v4f16 || |
7356 | RayDir.getValueType() == MVT::v4f32); |
7357 | |
7358 | if (!Subtarget->hasGFX10_AEncoding()) { |
7359 | emitRemovedIntrinsicError(DAG, DL, Op.getValueType()); |
7360 | return SDValue(); |
7361 | } |
7362 | |
7363 | bool IsA16 = RayDir.getValueType().getVectorElementType() == MVT::f16; |
7364 | bool Is64 = NodePtr.getValueType() == MVT::i64; |
7365 | unsigned Opcode = IsA16 ? Is64 ? AMDGPU::IMAGE_BVH64_INTERSECT_RAY_a16_nsa |
7366 | : AMDGPU::IMAGE_BVH_INTERSECT_RAY_a16_nsa |
7367 | : Is64 ? AMDGPU::IMAGE_BVH64_INTERSECT_RAY_nsa |
7368 | : AMDGPU::IMAGE_BVH_INTERSECT_RAY_nsa; |
7369 | |
7370 | SmallVector<SDValue, 16> Ops; |
7371 | |
7372 | auto packLanes = [&DAG, &Ops, &DL] (SDValue Op, bool IsAligned) { |
7373 | SmallVector<SDValue, 3> Lanes; |
7374 | DAG.ExtractVectorElements(Op, Lanes, 0, 3); |
7375 | if (Lanes[0].getValueSizeInBits() == 32) { |
7376 | for (unsigned I = 0; I < 3; ++I) |
7377 | Ops.push_back(DAG.getBitcast(MVT::i32, Lanes[I])); |
7378 | } else { |
7379 | if (IsAligned) { |
7380 | Ops.push_back( |
7381 | DAG.getBitcast(MVT::i32, |
7382 | DAG.getBuildVector(MVT::v2f16, DL, |
7383 | { Lanes[0], Lanes[1] }))); |
7384 | Ops.push_back(Lanes[2]); |
7385 | } else { |
7386 | SDValue Elt0 = Ops.pop_back_val(); |
7387 | Ops.push_back( |
7388 | DAG.getBitcast(MVT::i32, |
7389 | DAG.getBuildVector(MVT::v2f16, DL, |
7390 | { Elt0, Lanes[0] }))); |
7391 | Ops.push_back( |
7392 | DAG.getBitcast(MVT::i32, |
7393 | DAG.getBuildVector(MVT::v2f16, DL, |
7394 | { Lanes[1], Lanes[2] }))); |
7395 | } |
7396 | } |
7397 | }; |
7398 | |
7399 | if (Is64) |
7400 | DAG.ExtractVectorElements(DAG.getBitcast(MVT::v2i32, NodePtr), Ops, 0, 2); |
7401 | else |
7402 | Ops.push_back(NodePtr); |
7403 | |
7404 | Ops.push_back(DAG.getBitcast(MVT::i32, RayExtent)); |
7405 | packLanes(RayOrigin, true); |
7406 | packLanes(RayDir, true); |
7407 | packLanes(RayInvDir, false); |
7408 | Ops.push_back(TDescr); |
7409 | if (IsA16) |
7410 | Ops.push_back(DAG.getTargetConstant(1, DL, MVT::i1)); |
7411 | Ops.push_back(M->getChain()); |
7412 | |
7413 | auto *NewNode = DAG.getMachineNode(Opcode, DL, M->getVTList(), Ops); |
7414 | MachineMemOperand *MemRef = M->getMemOperand(); |
7415 | DAG.setNodeMemRefs(NewNode, {MemRef}); |
7416 | return SDValue(NewNode, 0); |
7417 | } |
7418 | case Intrinsic::amdgcn_global_atomic_fadd: |
7419 | if (!Op.getValue(0).use_empty() && !Subtarget->hasGFX90AInsts()) { |
7420 | DiagnosticInfoUnsupported |
7421 | NoFpRet(DAG.getMachineFunction().getFunction(), |
7422 | "return versions of fp atomics not supported", |
7423 | DL.getDebugLoc(), DS_Error); |
7424 | DAG.getContext()->diagnose(NoFpRet); |
7425 | return SDValue(); |
7426 | } |
7427 | LLVM_FALLTHROUGH; |
7428 | case Intrinsic::amdgcn_global_atomic_fmin: |
7429 | case Intrinsic::amdgcn_global_atomic_fmax: |
7430 | case Intrinsic::amdgcn_flat_atomic_fadd: |
7431 | case Intrinsic::amdgcn_flat_atomic_fmin: |
7432 | case Intrinsic::amdgcn_flat_atomic_fmax: { |
7433 | MemSDNode *M = cast<MemSDNode>(Op); |
7434 | SDValue Ops[] = { |
7435 | M->getOperand(0), |
7436 | M->getOperand(2), |
7437 | M->getOperand(3) |
7438 | }; |
7439 | unsigned Opcode = 0; |
7440 | switch (IntrID) { |
7441 | case Intrinsic::amdgcn_global_atomic_fadd: |
7442 | case Intrinsic::amdgcn_flat_atomic_fadd: { |
7443 | EVT VT = Op.getOperand(3).getValueType(); |
7444 | return DAG.getAtomic(ISD::ATOMIC_LOAD_FADD, DL, VT, |
7445 | DAG.getVTList(VT, MVT::Other), Ops, |
7446 | M->getMemOperand()); |
7447 | } |
7448 | case Intrinsic::amdgcn_global_atomic_fmin: |
7449 | case Intrinsic::amdgcn_flat_atomic_fmin: { |
7450 | Opcode = AMDGPUISD::ATOMIC_LOAD_FMIN; |
7451 | break; |
7452 | } |
7453 | case Intrinsic::amdgcn_global_atomic_fmax: |
7454 | case Intrinsic::amdgcn_flat_atomic_fmax: { |
7455 | Opcode = AMDGPUISD::ATOMIC_LOAD_FMAX; |
7456 | break; |
7457 | } |
7458 | default: |
7459 | llvm_unreachable("unhandled atomic opcode"); |
7460 | } |
7461 | return DAG.getMemIntrinsicNode(Opcode, SDLoc(Op), |
7462 | M->getVTList(), Ops, M->getMemoryVT(), |
7463 | M->getMemOperand()); |
7464 | } |
7465 | default: |
7466 | |
7467 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = |
7468 | AMDGPU::getImageDimIntrinsicInfo(IntrID)) |
7469 | return lowerImage(Op, ImageDimIntr, DAG, true); |
7470 | |
7471 | return SDValue(); |
7472 | } |
7473 | } |
7474 | |
7475 | |
7476 | |
7477 | SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, |
7478 | SDVTList VTList, |
7479 | ArrayRef<SDValue> Ops, EVT MemVT, |
7480 | MachineMemOperand *MMO, |
7481 | SelectionDAG &DAG) const { |
7482 | EVT VT = VTList.VTs[0]; |
7483 | EVT WidenedVT = VT; |
7484 | EVT WidenedMemVT = MemVT; |
7485 | if (!Subtarget->hasDwordx3LoadStores() && |
7486 | (WidenedVT == MVT::v3i32 || WidenedVT == MVT::v3f32)) { |
7487 | WidenedVT = EVT::getVectorVT(*DAG.getContext(), |
7488 | WidenedVT.getVectorElementType(), 4); |
7489 | WidenedMemVT = EVT::getVectorVT(*DAG.getContext(), |
7490 | WidenedMemVT.getVectorElementType(), 4); |
7491 | MMO = DAG.getMachineFunction().getMachineMemOperand(MMO, 0, 16); |
7492 | } |
7493 | |
7494 | assert(VTList.NumVTs == 2); |
7495 | SDVTList WidenedVTList = DAG.getVTList(WidenedVT, VTList.VTs[1]); |
7496 | |
7497 | auto NewOp = DAG.getMemIntrinsicNode(Opcode, DL, WidenedVTList, Ops, |
7498 | WidenedMemVT, MMO); |
7499 | if (WidenedVT != VT) { |
7500 | auto Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, NewOp, |
7501 | DAG.getVectorIdxConstant(0, DL)); |
7502 | NewOp = DAG.getMergeValues({ Extract, SDValue(NewOp.getNode(), 1) }, DL); |
7503 | } |
7504 | return NewOp; |
7505 | } |
7506 | |
7507 | SDValue SITargetLowering::handleD16VData(SDValue VData, SelectionDAG &DAG, |
7508 | bool ImageStore) const { |
7509 | EVT StoreVT = VData.getValueType(); |
7510 | |
7511 | |
7512 | if (!StoreVT.isVector()) |
7513 | return VData; |
7514 | |
7515 | SDLoc DL(VData); |
7516 | unsigned NumElements = StoreVT.getVectorNumElements(); |
7517 | |
7518 | if (Subtarget->hasUnpackedD16VMem()) { |
7519 | |
7520 | EVT IntStoreVT = StoreVT.changeTypeToInteger(); |
7521 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); |
7522 | |
7523 | EVT EquivStoreVT = |
7524 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElements); |
7525 | SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); |
7526 | return DAG.UnrollVectorOp(ZExt.getNode()); |
7527 | } |
7528 | |
7529 | |
7530 | |
7531 | |
7532 | if (ImageStore && Subtarget->hasImageStoreD16Bug()) { |
7533 | |
7534 | EVT IntStoreVT = StoreVT.changeTypeToInteger(); |
7535 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); |
7536 | |
7537 | |
7538 | SmallVector<SDValue, 4> Elts; |
7539 | DAG.ExtractVectorElements(IntVData, Elts); |
7540 | |
7541 | |
7542 | SmallVector<SDValue, 4> PackedElts; |
7543 | for (unsigned I = 0; I < Elts.size() / 2; I += 1) { |
7544 | SDValue Pair = |
7545 | DAG.getBuildVector(MVT::v2i16, DL, {Elts[I * 2], Elts[I * 2 + 1]}); |
7546 | SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); |
7547 | PackedElts.push_back(IntPair); |
7548 | } |
7549 | if ((NumElements % 2) == 1) { |
7550 | |
7551 | unsigned I = Elts.size() / 2; |
7552 | SDValue Pair = DAG.getBuildVector(MVT::v2i16, DL, |
7553 | {Elts[I * 2], DAG.getUNDEF(MVT::i16)}); |
7554 | SDValue IntPair = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Pair); |
7555 | PackedElts.push_back(IntPair); |
7556 | } |
7557 | |
7558 | |
7559 | PackedElts.resize(Elts.size(), DAG.getUNDEF(MVT::i32)); |
7560 | |
7561 | |
7562 | EVT VecVT = |
7563 | EVT::getVectorVT(*DAG.getContext(), MVT::i32, PackedElts.size()); |
7564 | return DAG.getBuildVector(VecVT, DL, PackedElts); |
7565 | } |
7566 | |
7567 | if (NumElements == 3) { |
7568 | EVT IntStoreVT = |
7569 | EVT::getIntegerVT(*DAG.getContext(), StoreVT.getStoreSizeInBits()); |
7570 | SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); |
7571 | |
7572 | EVT WidenedStoreVT = EVT::getVectorVT( |
7573 | *DAG.getContext(), StoreVT.getVectorElementType(), NumElements + 1); |
7574 | EVT WidenedIntVT = EVT::getIntegerVT(*DAG.getContext(), |
7575 | WidenedStoreVT.getStoreSizeInBits()); |
7576 | SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenedIntVT, IntVData); |
7577 | return DAG.getNode(ISD::BITCAST, DL, WidenedStoreVT, ZExt); |
7578 | } |
7579 | |
7580 | assert(isTypeLegal(StoreVT)); |
7581 | return VData; |
7582 | } |
7583 | |
7584 | SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, |
7585 | SelectionDAG &DAG) const { |
7586 | SDLoc DL(Op); |
7587 | SDValue Chain = Op.getOperand(0); |
7588 | unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); |
7589 | MachineFunction &MF = DAG.getMachineFunction(); |
7590 | |
7591 | switch (IntrinsicID) { |
7592 | case Intrinsic::amdgcn_exp_compr: { |
7593 | SDValue Src0 = Op.getOperand(4); |
7594 | SDValue Src1 = Op.getOperand(5); |
7595 | |
7596 | if (isTypeLegal(Src0.getValueType())) |
7597 | return SDValue(); |
7598 | |
7599 | const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); |
7600 | SDValue Undef = DAG.getUNDEF(MVT::f32); |
7601 | const SDValue Ops[] = { |
7602 | Op.getOperand(2), |
7603 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), |
7604 | DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), |
7605 | Undef, |
7606 | Undef, |
7607 | Op.getOperand(7), |
7608 | DAG.getTargetConstant(1, DL, MVT::i1), |
7609 | Op.getOperand(3), |
7610 | Op.getOperand(0) |
7611 | }; |
7612 | |
7613 | unsigned Opc = Done->isNullValue() ? AMDGPU::EXP : AMDGPU::EXP_DONE; |
7614 | return SDValue(DAG.getMachineNode(Opc, DL, Op->getVTList(), Ops), 0); |
7615 | } |
7616 | case Intrinsic::amdgcn_s_barrier: { |
7617 | if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { |
7618 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
7619 | unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; |
7620 | if (WGSize <= ST.getWavefrontSize()) |
7621 | return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, |
7622 | Op.getOperand(0)), 0); |
7623 | } |
7624 | return SDValue(); |
7625 | }; |
7626 | case Intrinsic::amdgcn_tbuffer_store: { |
7627 | SDValue VData = Op.getOperand(2); |
7628 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
7629 | if (IsD16) |
7630 | VData = handleD16VData(VData, DAG); |
7631 | unsigned Dfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); |
7632 | unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); |
7633 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); |
7634 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue(); |
7635 | unsigned IdxEn = getIdxEn(Op.getOperand(4)); |
7636 | SDValue Ops[] = { |
7637 | Chain, |
7638 | VData, |
7639 | Op.getOperand(3), |
7640 | Op.getOperand(4), |
7641 | Op.getOperand(5), |
7642 | Op.getOperand(6), |
7643 | Op.getOperand(7), |
7644 | DAG.getTargetConstant(Dfmt | (Nfmt << 4), DL, MVT::i32), |
7645 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), |
7646 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), |
7647 | }; |
7648 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : |
7649 | AMDGPUISD::TBUFFER_STORE_FORMAT; |
7650 | MemSDNode *M = cast<MemSDNode>(Op); |
7651 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
7652 | M->getMemoryVT(), M->getMemOperand()); |
7653 | } |
7654 | |
7655 | case Intrinsic::amdgcn_struct_tbuffer_store: { |
7656 | SDValue VData = Op.getOperand(2); |
7657 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
7658 | if (IsD16) |
7659 | VData = handleD16VData(VData, DAG); |
7660 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
7661 | SDValue Ops[] = { |
7662 | Chain, |
7663 | VData, |
7664 | Op.getOperand(3), |
7665 | Op.getOperand(4), |
7666 | Offsets.first, |
7667 | Op.getOperand(6), |
7668 | Offsets.second, |
7669 | Op.getOperand(7), |
7670 | Op.getOperand(8), |
7671 | DAG.getTargetConstant(1, DL, MVT::i1), |
7672 | }; |
7673 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : |
7674 | AMDGPUISD::TBUFFER_STORE_FORMAT; |
7675 | MemSDNode *M = cast<MemSDNode>(Op); |
7676 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
7677 | M->getMemoryVT(), M->getMemOperand()); |
7678 | } |
7679 | |
7680 | case Intrinsic::amdgcn_raw_tbuffer_store: { |
7681 | SDValue VData = Op.getOperand(2); |
7682 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
7683 | if (IsD16) |
7684 | VData = handleD16VData(VData, DAG); |
7685 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
7686 | SDValue Ops[] = { |
7687 | Chain, |
7688 | VData, |
7689 | Op.getOperand(3), |
7690 | DAG.getConstant(0, DL, MVT::i32), |
7691 | Offsets.first, |
7692 | Op.getOperand(5), |
7693 | Offsets.second, |
7694 | Op.getOperand(6), |
7695 | Op.getOperand(7), |
7696 | DAG.getTargetConstant(0, DL, MVT::i1), |
7697 | }; |
7698 | unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : |
7699 | AMDGPUISD::TBUFFER_STORE_FORMAT; |
7700 | MemSDNode *M = cast<MemSDNode>(Op); |
7701 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
7702 | M->getMemoryVT(), M->getMemOperand()); |
7703 | } |
7704 | |
7705 | case Intrinsic::amdgcn_buffer_store: |
7706 | case Intrinsic::amdgcn_buffer_store_format: { |
7707 | SDValue VData = Op.getOperand(2); |
7708 | bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); |
7709 | if (IsD16) |
7710 | VData = handleD16VData(VData, DAG); |
7711 | unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); |
7712 | unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); |
7713 | unsigned IdxEn = getIdxEn(Op.getOperand(4)); |
7714 | SDValue Ops[] = { |
7715 | Chain, |
7716 | VData, |
7717 | Op.getOperand(3), |
7718 | Op.getOperand(4), |
7719 | SDValue(), |
7720 | SDValue(), |
7721 | SDValue(), |
7722 | DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), |
7723 | DAG.getTargetConstant(IdxEn, DL, MVT::i1), |
7724 | }; |
7725 | setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); |
7726 | |
7727 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? |
7728 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; |
7729 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; |
7730 | MemSDNode *M = cast<MemSDNode>(Op); |
7731 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); |
7732 | |
7733 | |
7734 | EVT VDataType = VData.getValueType().getScalarType(); |
7735 | if (VDataType == MVT::i8 || VDataType == MVT::i16) |
7736 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); |
7737 | |
7738 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
7739 | M->getMemoryVT(), M->getMemOperand()); |
7740 | } |
7741 | |
7742 | case Intrinsic::amdgcn_raw_buffer_store: |
7743 | case Intrinsic::amdgcn_raw_buffer_store_format: { |
7744 | const bool IsFormat = |
7745 | IntrinsicID == Intrinsic::amdgcn_raw_buffer_store_format; |
7746 | |
7747 | SDValue VData = Op.getOperand(2); |
7748 | EVT VDataVT = VData.getValueType(); |
7749 | EVT EltType = VDataVT.getScalarType(); |
7750 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); |
7751 | if (IsD16) { |
7752 | VData = handleD16VData(VData, DAG); |
7753 | VDataVT = VData.getValueType(); |
7754 | } |
7755 | |
7756 | if (!isTypeLegal(VDataVT)) { |
7757 | VData = |
7758 | DAG.getNode(ISD::BITCAST, DL, |
7759 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); |
7760 | } |
7761 | |
7762 | auto Offsets = splitBufferOffsets(Op.getOperand(4), DAG); |
7763 | SDValue Ops[] = { |
7764 | Chain, |
7765 | VData, |
7766 | Op.getOperand(3), |
7767 | DAG.getConstant(0, DL, MVT::i32), |
7768 | Offsets.first, |
7769 | Op.getOperand(5), |
7770 | Offsets.second, |
7771 | Op.getOperand(6), |
7772 | DAG.getTargetConstant(0, DL, MVT::i1), |
7773 | }; |
7774 | unsigned Opc = |
7775 | IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE; |
7776 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; |
7777 | MemSDNode *M = cast<MemSDNode>(Op); |
7778 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]); |
7779 | |
7780 | |
7781 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) |
7782 | return handleByteShortBufferStores(DAG, VDataVT, DL, Ops, M); |
7783 | |
7784 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
7785 | M->getMemoryVT(), M->getMemOperand()); |
7786 | } |
7787 | |
7788 | case Intrinsic::amdgcn_struct_buffer_store: |
7789 | case Intrinsic::amdgcn_struct_buffer_store_format: { |
7790 | const bool IsFormat = |
7791 | IntrinsicID == Intrinsic::amdgcn_struct_buffer_store_format; |
7792 | |
7793 | SDValue VData = Op.getOperand(2); |
7794 | EVT VDataVT = VData.getValueType(); |
7795 | EVT EltType = VDataVT.getScalarType(); |
7796 | bool IsD16 = IsFormat && (EltType.getSizeInBits() == 16); |
7797 | |
7798 | if (IsD16) { |
7799 | VData = handleD16VData(VData, DAG); |
7800 | VDataVT = VData.getValueType(); |
7801 | } |
7802 | |
7803 | if (!isTypeLegal(VDataVT)) { |
7804 | VData = |
7805 | DAG.getNode(ISD::BITCAST, DL, |
7806 | getEquivalentMemType(*DAG.getContext(), VDataVT), VData); |
7807 | } |
7808 | |
7809 | auto Offsets = splitBufferOffsets(Op.getOperand(5), DAG); |
7810 | SDValue Ops[] = { |
7811 | Chain, |
7812 | VData, |
7813 | Op.getOperand(3), |
7814 | Op.getOperand(4), |
7815 | Offsets.first, |
7816 | Op.getOperand(6), |
7817 | Offsets.second, |
7818 | Op.getOperand(7), |
7819 | DAG.getTargetConstant(1, DL, MVT::i1), |
7820 | }; |
7821 | unsigned Opc = IntrinsicID == Intrinsic::amdgcn_struct_buffer_store ? |
7822 | AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; |
7823 | Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; |
7824 | MemSDNode *M = cast<MemSDNode>(Op); |
7825 | updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]); |
7826 | |
7827 | |
7828 | EVT VDataType = VData.getValueType().getScalarType(); |
7829 | if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) |
7830 | return handleByteShortBufferStores(DAG, VDataType, DL, Ops, M); |
7831 | |
7832 | return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, |
7833 | M->getMemoryVT(), M->getMemOperand()); |
7834 | } |
7835 | case Intrinsic::amdgcn_end_cf: |
7836 | return SDValue(DAG.getMachineNode(AMDGPU::SI_END_CF, DL, MVT::Other, |
7837 | Op->getOperand(2), Chain), 0); |
7838 | |
7839 | default: { |
7840 | if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = |
7841 | AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) |
7842 | return lowerImage(Op, ImageDimIntr, DAG, true); |
7843 | |
7844 | return Op; |
7845 | } |
7846 | } |
7847 | } |
7848 | |
7849 | |
7850 | |
7851 | |
7852 | |
7853 | |
7854 | |
7855 | std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets( |
7856 | SDValue Offset, SelectionDAG &DAG) const { |
7857 | SDLoc DL(Offset); |
7858 | const unsigned MaxImm = 4095; |
7859 | SDValue N0 = Offset; |
7860 | ConstantSDNode *C1 = nullptr; |
7861 | |
7862 | if ((C1 = dyn_cast<ConstantSDNode>(N0))) |
7863 | N0 = SDValue(); |
7864 | else if (DAG.isBaseWithConstantOffset(N0)) { |
7865 | C1 = cast<ConstantSDNode>(N0.getOperand(1)); |
7866 | N0 = N0.getOperand(0); |
7867 | } |
7868 | |
7869 | if (C1) { |
7870 | unsigned ImmOffset = C1->getZExtValue(); |
7871 | |
7872 | |
7873 | |
7874 | |
7875 | |
7876 | |
7877 | |
7878 | unsigned Overflow = ImmOffset & ~MaxImm; |
7879 | ImmOffset -= Overflow; |
7880 | if ((int32_t)Overflow < 0) { |
7881 | Overflow += ImmOffset; |
7882 | ImmOffset = 0; |
7883 | } |
7884 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(ImmOffset, DL, MVT::i32)); |
7885 | if (Overflow) { |
7886 | auto OverflowVal = DAG.getConstant(Overflow, DL, MVT::i32); |
7887 | if (!N0) |
7888 | N0 = OverflowVal; |
7889 | else { |
7890 | SDValue Ops[] = { N0, OverflowVal }; |
7891 | N0 = DAG.getNode(ISD::ADD, DL, MVT::i32, Ops); |
7892 | } |
7893 | } |
7894 | } |
7895 | if (!N0) |
7896 | N0 = DAG.getConstant(0, DL, MVT::i32); |
7897 | if (!C1) |
7898 | C1 = cast<ConstantSDNode>(DAG.getTargetConstant(0, DL, MVT::i32)); |
7899 | return {N0, SDValue(C1, 0)}; |
7900 | } |
7901 | |
7902 | |
7903 | |
7904 | |
7905 | void SITargetLowering::setBufferOffsets(SDValue CombinedOffset, |
7906 | SelectionDAG &DAG, SDValue *Offsets, |
7907 | Align Alignment) const { |
7908 | SDLoc DL(CombinedOffset); |
7909 | if (auto C = dyn_cast<ConstantSDNode>(CombinedOffset)) { |
7910 | uint32_t Imm = C->getZExtValue(); |
7911 | uint32_t SOffset, ImmOffset; |
7912 | if (AMDGPU::splitMUBUFOffset(Imm, SOffset, ImmOffset, Subtarget, |
7913 | Alignment)) { |
7914 | Offsets[0] = DAG.getConstant(0, DL, MVT::i32); |
7915 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); |
7916 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); |
7917 | return; |
7918 | } |
7919 | } |
7920 | if (DAG.isBaseWithConstantOffset(CombinedOffset)) { |
7921 | SDValue N0 = CombinedOffset.getOperand(0); |
7922 | SDValue N1 = CombinedOffset.getOperand(1); |
7923 | uint32_t SOffset, ImmOffset; |
7924 | int Offset = cast<ConstantSDNode>(N1)->getSExtValue(); |
7925 | if (Offset >= 0 && AMDGPU::splitMUBUFOffset(Offset, SOffset, ImmOffset, |
7926 | Subtarget, Alignment)) { |
7927 | Offsets[0] = N0; |
7928 | Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); |
7929 | Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); |
7930 | return; |
7931 | } |
7932 | } |
7933 | Offsets[0] = CombinedOffset; |
7934 | Offsets[1] = DAG.getConstant(0, DL, MVT::i32); |
7935 | Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32); |
7936 | } |
7937 | |
7938 | |
7939 | SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG, |
7940 | EVT LoadVT, SDLoc DL, |
7941 | ArrayRef<SDValue> Ops, |
7942 | MemSDNode *M) const { |
7943 | EVT IntVT = LoadVT.changeTypeToInteger(); |
7944 | unsigned Opc = (LoadVT.getScalarType() == MVT::i8) ? |
7945 | AMDGPUISD::BUFFER_LOAD_UBYTE : AMDGPUISD::BUFFER_LOAD_USHORT; |
7946 | |
7947 | SDVTList ResList = DAG.getVTList(MVT::i32, MVT::Other); |
7948 | SDValue BufferLoad = DAG.getMemIntrinsicNode(Opc, DL, ResList, |
7949 | Ops, IntVT, |
7950 | M->getMemOperand()); |
7951 | SDValue LoadVal = DAG.getNode(ISD::TRUNCATE, DL, IntVT, BufferLoad); |
7952 | LoadVal = DAG.getNode(ISD::BITCAST, DL, LoadVT, LoadVal); |
7953 | |
7954 | return DAG.getMergeValues({LoadVal, BufferLoad.getValue(1)}, DL); |
7955 | } |
7956 | |
7957 | |
7958 | SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG, |
7959 | EVT VDataType, SDLoc DL, |
7960 | SDValue Ops[], |
7961 | MemSDNode *M) const { |
7962 | if (VDataType == MVT::f16) |
7963 | Ops[1] = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Ops[1]); |
7964 | |
7965 | SDValue BufferStoreExt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Ops[1]); |
7966 | Ops[1] = BufferStoreExt; |
7967 | unsigned Opc = (VDataType == MVT::i8) ? AMDGPUISD::BUFFER_STORE_BYTE : |
7968 | AMDGPUISD::BUFFER_STORE_SHORT; |
7969 | ArrayRef<SDValue> OpsRef = makeArrayRef(&Ops[0], 9); |
7970 | return DAG.getMemIntrinsicNode(Opc, DL, M->getVTList(), OpsRef, VDataType, |
7971 | M->getMemOperand()); |
7972 | } |
7973 | |
7974 | static SDValue getLoadExtOrTrunc(SelectionDAG &DAG, |
7975 | ISD::LoadExtType ExtType, SDValue Op, |
7976 | const SDLoc &SL, EVT VT) { |
7977 | if (VT.bitsLT(Op.getValueType())) |
7978 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Op); |
7979 | |
7980 | switch (ExtType) { |
7981 | case ISD::SEXTLOAD: |
7982 | return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op); |
7983 | case ISD::ZEXTLOAD: |
7984 | return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op); |
7985 | case ISD::EXTLOAD: |
7986 | return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op); |
7987 | case ISD::NON_EXTLOAD: |
7988 | return Op; |
7989 | } |
7990 | |
7991 | llvm_unreachable("invalid ext type"); |
7992 | } |
7993 | |
7994 | SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { |
7995 | SelectionDAG &DAG = DCI.DAG; |
7996 | if (Ld->getAlignment() < 4 || Ld->isDivergent()) |
7997 | return SDValue(); |
7998 | |
7999 | |
8000 | unsigned AS = Ld->getAddressSpace(); |
8001 | if (AS != AMDGPUAS::CONSTANT_ADDRESS && |
8002 | AS != AMDGPUAS::CONSTANT_ADDRESS_32BIT && |
8003 | (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant())) |
8004 | return SDValue(); |
8005 | |
8006 | |
8007 | |
8008 | |
8009 | EVT MemVT = Ld->getMemoryVT(); |
8010 | if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) || |
8011 | MemVT.getSizeInBits() >= 32) |
8012 | return SDValue(); |
8013 | |
8014 | SDLoc SL(Ld); |
8015 | |
8016 | assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && |
8017 | "unexpected vector extload"); |
8018 | |
8019 | |
8020 | SDValue Ptr = Ld->getBasePtr(); |
8021 | SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, |
8022 | MVT::i32, SL, Ld->getChain(), Ptr, |
8023 | Ld->getOffset(), |
8024 | Ld->getPointerInfo(), MVT::i32, |
8025 | Ld->getAlignment(), |
8026 | Ld->getMemOperand()->getFlags(), |
8027 | Ld->getAAInfo(), |
8028 | nullptr); |
8029 | |
8030 | EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); |
8031 | if (MemVT.isFloatingPoint()) { |
8032 | assert(Ld->getExtensionType() == ISD::NON_EXTLOAD && |
8033 | "unexpected fp extload"); |
8034 | TruncVT = MemVT.changeTypeToInteger(); |
8035 | } |
8036 | |
8037 | SDValue Cvt = NewLoad; |
8038 | if (Ld->getExtensionType() == ISD::SEXTLOAD) { |
8039 | Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad, |
8040 | DAG.getValueType(TruncVT)); |
8041 | } else if (Ld->getExtensionType() == ISD::ZEXTLOAD || |
8042 | Ld->getExtensionType() == ISD::NON_EXTLOAD) { |
8043 | Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT); |
8044 | } else { |
8045 | assert(Ld->getExtensionType() == ISD::EXTLOAD); |
8046 | } |
8047 | |
8048 | EVT VT = Ld->getValueType(0); |
8049 | EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); |
8050 | |
8051 | DCI.AddToWorklist(Cvt.getNode()); |
8052 | |
8053 | |
8054 | |
8055 | Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT); |
8056 | DCI.AddToWorklist(Cvt.getNode()); |
8057 | |
8058 | |
8059 | Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt); |
8060 | |
8061 | return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL); |
8062 | } |
8063 | |
8064 | SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { |
8065 | SDLoc DL(Op); |
8066 | LoadSDNode *Load = cast<LoadSDNode>(Op); |
8067 | ISD::LoadExtType ExtType = Load->getExtensionType(); |
8068 | EVT MemVT = Load->getMemoryVT(); |
8069 | |
8070 | if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { |
8071 | if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) |
8072 | return SDValue(); |
8073 | |
8074 | |
8075 | |
8076 | |
8077 | SDValue Chain = Load->getChain(); |
8078 | SDValue BasePtr = Load->getBasePtr(); |
8079 | MachineMemOperand *MMO = Load->getMemOperand(); |
8080 | |
8081 | EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; |
8082 | |
8083 | SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, |
8084 | BasePtr, RealMemVT, MMO); |
8085 | |
8086 | if (!MemVT.isVector()) { |
8087 | SDValue Ops[] = { |
8088 | DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), |
8089 | NewLD.getValue(1) |
8090 | }; |
8091 | |
8092 | return DAG.getMergeValues(Ops, DL); |
8093 | } |
8094 | |
8095 | SmallVector<SDValue, 3> Elts; |
8096 | for (unsigned I = 0, N = MemVT.getVectorNumElements(); I != N; ++I) { |
8097 | SDValue Elt = DAG.getNode(ISD::SRL, DL, MVT::i32, NewLD, |
8098 | DAG.getConstant(I, DL, MVT::i32)); |
8099 | |
8100 | Elts.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Elt)); |
8101 | } |
8102 | |
8103 | SDValue Ops[] = { |
8104 | DAG.getBuildVector(MemVT, DL, Elts), |
8105 | NewLD.getValue(1) |
8106 | }; |
8107 | |
8108 | return DAG.getMergeValues(Ops, DL); |
8109 | } |
8110 | |
8111 | if (!MemVT.isVector()) |
8112 | return SDValue(); |
8113 | |
8114 | assert(Op.getValueType().getVectorElementType() == MVT::i32 && |
8115 | "Custom lowering for non-i32 vectors hasn't been implemented."); |
8116 | |
8117 | unsigned Alignment = Load->getAlignment(); |
8118 | unsigned AS = Load->getAddressSpace(); |
8119 | if (Subtarget->hasLDSMisalignedBug() && |
8120 | AS == AMDGPUAS::FLAT_ADDRESS && |
8121 | Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) { |
8122 | return SplitVectorLoad(Op, DAG); |
8123 | } |
8124 | |
8125 | MachineFunction &MF = DAG.getMachineFunction(); |
8126 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
8127 | |
8128 | |
8129 | if (AS == AMDGPUAS::FLAT_ADDRESS && |
8130 | !Subtarget->hasMultiDwordFlatScratchAddressing()) |
8131 | AS = MFI->hasFlatScratchInit() ? |
8132 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; |
8133 | |
8134 | unsigned NumElements = MemVT.getVectorNumElements(); |
8135 | |
8136 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
8137 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { |
8138 | if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) { |
8139 | if (MemVT.isPow2VectorType()) |
8140 | return SDValue(); |
8141 | return WidenOrSplitVectorLoad(Op, DAG); |
8142 | } |
8143 | |
8144 | |
8145 | |
8146 | |
8147 | } |
8148 | |
8149 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
8150 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || |
8151 | AS == AMDGPUAS::GLOBAL_ADDRESS) { |
8152 | if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && |
8153 | Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) && |
8154 | Alignment >= 4 && NumElements < 32) { |
8155 | if (MemVT.isPow2VectorType()) |
8156 | return SDValue(); |
8157 | return WidenOrSplitVectorLoad(Op, DAG); |
8158 | } |
8159 | |
8160 | |
8161 | |
8162 | |
8163 | } |
8164 | if (AS == AMDGPUAS::CONSTANT_ADDRESS || |
8165 | AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || |
8166 | AS == AMDGPUAS::GLOBAL_ADDRESS || |
8167 | AS == AMDGPUAS::FLAT_ADDRESS) { |
8168 | if (NumElements > 4) |
8169 | return SplitVectorLoad(Op, DAG); |
8170 | |
8171 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) |
8172 | return WidenOrSplitVectorLoad(Op, DAG); |
8173 | |
8174 | |
8175 | return SDValue(); |
8176 | } |
8177 | if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
8178 | |
8179 | |
8180 | |
8181 | switch (Subtarget->getMaxPrivateElementSize()) { |
8182 | case 4: { |
8183 | SDValue Ops[2]; |
8184 | std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG); |
8185 | return DAG.getMergeValues(Ops, DL); |
8186 | } |
8187 | case 8: |
8188 | if (NumElements > 2) |
8189 | return SplitVectorLoad(Op, DAG); |
8190 | return SDValue(); |
8191 | case 16: |
8192 | |
8193 | if (NumElements > 4) |
8194 | return SplitVectorLoad(Op, DAG); |
8195 | |
8196 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) |
8197 | return WidenOrSplitVectorLoad(Op, DAG); |
8198 | |
8199 | return SDValue(); |
8200 | default: |
8201 | llvm_unreachable("unsupported private_element_size"); |
8202 | } |
8203 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { |
8204 | |
8205 | if (Subtarget->hasDS96AndDS128() && |
8206 | ((Subtarget->useDS128() && MemVT.getStoreSize() == 16) || |
8207 | MemVT.getStoreSize() == 12) && |
8208 | allowsMisalignedMemoryAccessesImpl(MemVT.getSizeInBits(), AS, |
8209 | Load->getAlign())) |
8210 | return SDValue(); |
8211 | |
8212 | if (NumElements > 2) |
8213 | return SplitVectorLoad(Op, DAG); |
8214 | |
8215 | |
8216 | |
8217 | |
8218 | |
8219 | |
8220 | if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && |
8221 | NumElements == 2 && MemVT.getStoreSize() == 8 && |
8222 | Load->getAlignment() < 8) { |
8223 | return SplitVectorLoad(Op, DAG); |
8224 | } |
8225 | } |
8226 | |
8227 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), |
8228 | MemVT, *Load->getMemOperand())) { |
8229 | SDValue Ops[2]; |
8230 | std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); |
8231 | return DAG.getMergeValues(Ops, DL); |
8232 | } |
8233 | |
8234 | return SDValue(); |
8235 | } |
8236 | |
8237 | SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { |
8238 | EVT VT = Op.getValueType(); |
8239 | assert(VT.getSizeInBits() == 64); |
8240 | |
8241 | SDLoc DL(Op); |
8242 | SDValue Cond = Op.getOperand(0); |
8243 | |
8244 | SDValue Zero = DAG.getConstant(0, DL, MVT::i32); |
8245 | SDValue One = DAG.getConstant(1, DL, MVT::i32); |
8246 | |
8247 | SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); |
8248 | SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); |
8249 | |
8250 | SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); |
8251 | SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); |
8252 | |
8253 | SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); |
8254 | |
8255 | SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); |
8256 | SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); |
8257 | |
8258 | SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); |
8259 | |
8260 | SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); |
8261 | return DAG.getNode(ISD::BITCAST, DL, VT, Res); |
8262 | } |
8263 | |
8264 | |
8265 | |
8266 | SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, |
8267 | SelectionDAG &DAG) const { |
8268 | SDLoc SL(Op); |
8269 | SDValue LHS = Op.getOperand(0); |
8270 | SDValue RHS = Op.getOperand(1); |
8271 | EVT VT = Op.getValueType(); |
8272 | const SDNodeFlags Flags = Op->getFlags(); |
8273 | |
8274 | bool AllowInaccurateRcp = Flags.hasApproximateFuncs(); |
8275 | |
8276 | |
8277 | |
8278 | if (!AllowInaccurateRcp) |
8279 | return SDValue(); |
8280 | |
8281 | if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { |
8282 | if (CLHS->isExactlyValue(1.0)) { |
8283 | |
8284 | |
8285 | |
8286 | |
8287 | |
8288 | |
8289 | |
8290 | |
8291 | |
8292 | |
8293 | |
8294 | if (RHS.getOpcode() == ISD::FSQRT) |
8295 | return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); |
8296 | |
8297 | |
8298 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
8299 | } |
8300 | |
8301 | |
8302 | if (CLHS->isExactlyValue(-1.0)) { |
8303 | |
8304 | SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); |
8305 | return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); |
8306 | } |
8307 | } |
8308 | |
8309 | |
8310 | |
8311 | SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); |
8312 | return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); |
8313 | } |
8314 | |
8315 | SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op, |
8316 | SelectionDAG &DAG) const { |
8317 | SDLoc SL(Op); |
8318 | SDValue X = Op.getOperand(0); |
8319 | SDValue Y = Op.getOperand(1); |
8320 | EVT VT = Op.getValueType(); |
8321 | const SDNodeFlags Flags = Op->getFlags(); |
8322 | |
8323 | bool AllowInaccurateDiv = Flags.hasApproximateFuncs() || |
8324 | DAG.getTarget().Options.UnsafeFPMath; |
8325 | if (!AllowInaccurateDiv) |
8326 | return SDValue(); |
8327 | |
8328 | SDValue NegY = DAG.getNode(ISD::FNEG, SL, VT, Y); |
8329 | SDValue One = DAG.getConstantFP(1.0, SL, VT); |
8330 | |
8331 | SDValue R = DAG.getNode(AMDGPUISD::RCP, SL, VT, Y); |
8332 | SDValue Tmp0 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); |
8333 | |
8334 | R = DAG.getNode(ISD::FMA, SL, VT, Tmp0, R, R); |
8335 | SDValue Tmp1 = DAG.getNode(ISD::FMA, SL, VT, NegY, R, One); |
8336 | R = DAG.getNode(ISD::FMA, SL, VT, Tmp1, R, R); |
8337 | SDValue Ret = DAG.getNode(ISD::FMUL, SL, VT, X, R); |
8338 | SDValue Tmp2 = DAG.getNode(ISD::FMA, SL, VT, NegY, Ret, X); |
8339 | return DAG.getNode(ISD::FMA, SL, VT, Tmp2, R, Ret); |
8340 | } |
8341 | |
8342 | static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, |
8343 | EVT VT, SDValue A, SDValue B, SDValue GlueChain, |
8344 | SDNodeFlags Flags) { |
8345 | if (GlueChain->getNumValues() <= 1) { |
8346 | return DAG.getNode(Opcode, SL, VT, A, B, Flags); |
8347 | } |
8348 | |
8349 | assert(GlueChain->getNumValues() == 3); |
8350 | |
8351 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); |
8352 | switch (Opcode) { |
8353 | default: llvm_unreachable("no chain equivalent for opcode"); |
8354 | case ISD::FMUL: |
8355 | Opcode = AMDGPUISD::FMUL_W_CHAIN; |
8356 | break; |
8357 | } |
8358 | |
8359 | return DAG.getNode(Opcode, SL, VTList, |
8360 | {GlueChain.getValue(1), A, B, GlueChain.getValue(2)}, |
8361 | Flags); |
8362 | } |
8363 | |
8364 | static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, |
8365 | EVT VT, SDValue A, SDValue B, SDValue C, |
8366 | SDValue GlueChain, SDNodeFlags Flags) { |
8367 | if (GlueChain->getNumValues() <= 1) { |
8368 | return DAG.getNode(Opcode, SL, VT, {A, B, C}, Flags); |
8369 | } |
8370 | |
8371 | assert(GlueChain->getNumValues() == 3); |
8372 | |
8373 | SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); |
8374 | switch (Opcode) { |
8375 | default: llvm_unreachable("no chain equivalent for opcode"); |
8376 | case ISD::FMA: |
8377 | Opcode = AMDGPUISD::FMA_W_CHAIN; |
8378 | break; |
8379 | } |
8380 | |
8381 | return DAG.getNode(Opcode, SL, VTList, |
8382 | {GlueChain.getValue(1), A, B, C, GlueChain.getValue(2)}, |
8383 | Flags); |
8384 | } |
8385 | |
8386 | SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { |
8387 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) |
8388 | return FastLowered; |
8389 | |
8390 | SDLoc SL(Op); |
8391 | SDValue Src0 = Op.getOperand(0); |
8392 | SDValue Src1 = Op.getOperand(1); |
8393 | |
8394 | SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); |
8395 | SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); |
8396 | |
8397 | SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); |
8398 | SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); |
8399 | |
8400 | SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); |
8401 | SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); |
8402 | |
8403 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); |
8404 | } |
8405 | |
8406 | |
8407 | SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { |
8408 | SDLoc SL(Op); |
8409 | SDValue LHS = Op.getOperand(1); |
8410 | SDValue RHS = Op.getOperand(2); |
8411 | |
8412 | SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); |
8413 | |
8414 | const APFloat K0Val(BitsToFloat(0x6f800000)); |
8415 | const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); |
8416 | |
8417 | const APFloat K1Val(BitsToFloat(0x2f800000)); |
8418 | const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); |
8419 | |
8420 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); |
8421 | |
8422 | EVT SetCCVT = |
8423 | getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); |
8424 | |
8425 | SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); |
8426 | |
8427 | SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); |
8428 | |
8429 | |
8430 | r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); |
8431 | |
8432 | |
8433 | SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); |
8434 | |
8435 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); |
8436 | |
8437 | return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); |
8438 | } |
8439 | |
8440 | |
8441 | |
8442 | static SDValue getSPDenormModeValue(int SPDenormMode, SelectionDAG &DAG, |
8443 | const SDLoc &SL, const GCNSubtarget *ST) { |
8444 | assert(ST->hasDenormModeInst() && "Requires S_DENORM_MODE"); |
8445 | int DPDenormModeDefault = hasFP64FP16Denormals(DAG.getMachineFunction()) |
8446 | ? FP_DENORM_FLUSH_NONE |
8447 | : FP_DENORM_FLUSH_IN_FLUSH_OUT; |
8448 | |
8449 | int Mode = SPDenormMode | (DPDenormModeDefault << 2); |
8450 | return DAG.getTargetConstant(Mode, SL, MVT::i32); |
8451 | } |
8452 | |
8453 | SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { |
8454 | if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) |
8455 | return FastLowered; |
8456 | |
8457 | |
8458 | |
8459 | |
8460 | |
8461 | SDNodeFlags Flags = Op->getFlags(); |
8462 | Flags.setNoFPExcept(true); |
8463 | |
8464 | SDLoc SL(Op); |
8465 | SDValue LHS = Op.getOperand(0); |
8466 | SDValue RHS = Op.getOperand(1); |
8467 | |
8468 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); |
8469 | |
8470 | SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); |
8471 | |
8472 | SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, |
8473 | {RHS, RHS, LHS}, Flags); |
8474 | SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, |
8475 | {LHS, RHS, LHS}, Flags); |
8476 | |
8477 | |
8478 | SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, |
8479 | DenominatorScaled, Flags); |
8480 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, |
8481 | DenominatorScaled, Flags); |
8482 | |
8483 | const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | |
8484 | (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | |
8485 | (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); |
8486 | const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i32); |
8487 | |
8488 | const bool HasFP32Denormals = hasFP32Denormals(DAG.getMachineFunction()); |
8489 | |
8490 | if (!HasFP32Denormals) { |
8491 | |
8492 | |
8493 | |
8494 | |
8495 | SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); |
8496 | |
8497 | SDNode *EnableDenorm; |
8498 | if (Subtarget->hasDenormModeInst()) { |
8499 | const SDValue EnableDenormValue = |
8500 | getSPDenormModeValue(FP_DENORM_FLUSH_NONE, DAG, SL, Subtarget); |
8501 | |
8502 | EnableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, BindParamVTs, |
8503 | DAG.getEntryNode(), EnableDenormValue).getNode(); |
8504 | } else { |
8505 | const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, |
8506 | SL, MVT::i32); |
8507 | EnableDenorm = |
8508 | DAG.getMachineNode(AMDGPU::S_SETREG_B32, SL, BindParamVTs, |
8509 | {EnableDenormValue, BitField, DAG.getEntryNode()}); |
8510 | } |
8511 | |
8512 | SDValue Ops[3] = { |
8513 | NegDivScale0, |
8514 | SDValue(EnableDenorm, 0), |
8515 | SDValue(EnableDenorm, 1) |
8516 | }; |
8517 | |
8518 | NegDivScale0 = DAG.getMergeValues(Ops, SL); |
8519 | } |
8520 | |
8521 | SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, |
8522 | ApproxRcp, One, NegDivScale0, Flags); |
8523 | |
8524 | SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, |
8525 | ApproxRcp, Fma0, Flags); |
8526 | |
8527 | SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, |
8528 | Fma1, Fma1, Flags); |
8529 | |
8530 | SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, |
8531 | NumeratorScaled, Mul, Flags); |
8532 | |
8533 | SDValue Fma3 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, |
8534 | Fma2, Fma1, Mul, Fma2, Flags); |
8535 | |
8536 | SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, |
8537 | NumeratorScaled, Fma3, Flags); |
8538 | |
8539 | if (!HasFP32Denormals) { |
8540 | SDNode *DisableDenorm; |
8541 | if (Subtarget->hasDenormModeInst()) { |
8542 | const SDValue DisableDenormValue = |
8543 | getSPDenormModeValue(FP_DENORM_FLUSH_IN_FLUSH_OUT, DAG, SL, Subtarget); |
8544 | |
8545 | DisableDenorm = DAG.getNode(AMDGPUISD::DENORM_MODE, SL, MVT::Other, |
8546 | Fma4.getValue(1), DisableDenormValue, |
8547 | Fma4.getValue(2)).getNode(); |
8548 | } else { |
8549 | const SDValue DisableDenormValue = |
8550 | DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); |
8551 | |
8552 | DisableDenorm = DAG.getMachineNode( |
8553 | AMDGPU::S_SETREG_B32, SL, MVT::Other, |
8554 | {DisableDenormValue, BitField, Fma4.getValue(1), Fma4.getValue(2)}); |
8555 | } |
8556 | |
8557 | SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, |
8558 | SDValue(DisableDenorm, 0), DAG.getRoot()); |
8559 | DAG.setRoot(OutputChain); |
8560 | } |
8561 | |
8562 | SDValue Scale = NumeratorScaled.getValue(1); |
8563 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, |
8564 | {Fma4, Fma1, Fma3, Scale}, Flags); |
8565 | |
8566 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS, Flags); |
8567 | } |
8568 | |
8569 | SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { |
8570 | if (SDValue FastLowered = lowerFastUnsafeFDIV64(Op, DAG)) |
8571 | return FastLowered; |
8572 | |
8573 | SDLoc SL(Op); |
8574 | SDValue X = Op.getOperand(0); |
8575 | SDValue Y = Op.getOperand(1); |
8576 | |
8577 | const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); |
8578 | |
8579 | SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); |
8580 | |
8581 | SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); |
8582 | |
8583 | SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); |
8584 | |
8585 | SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); |
8586 | |
8587 | SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); |
8588 | |
8589 | SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); |
8590 | |
8591 | SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); |
8592 | |
8593 | SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); |
8594 | |
8595 | SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); |
8596 | SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); |
8597 | |
8598 | SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, |
8599 | NegDivScale0, Mul, DivScale1); |
8600 | |
8601 | SDValue Scale; |
8602 | |
8603 | if (!Subtarget->hasUsableDivScaleConditionOutput()) { |
8604 | |
8605 | |
8606 | |
8607 | const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); |
8608 | |
8609 | |
8610 | SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); |
8611 | SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); |
8612 | SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); |
8613 | SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); |
8614 | |
8615 | SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); |
8616 | SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); |
8617 | |
8618 | SDValue Scale0Hi |
8619 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); |
8620 | SDValue Scale1Hi |
8621 | = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); |
8622 | |
8623 | SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); |
8624 | SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); |
8625 | Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); |
8626 | } else { |
8627 | Scale = DivScale1.getValue(1); |
8628 | } |
8629 | |
8630 | SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, |
8631 | Fma4, Fma3, Mul, Scale); |
8632 | |
8633 | return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); |
8634 | } |
8635 | |
8636 | SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { |
8637 | EVT VT = Op.getValueType(); |
8638 | |
8639 | if (VT == MVT::f32) |
8640 | return LowerFDIV32(Op, DAG); |
8641 | |
8642 | if (VT == MVT::f64) |
8643 | return LowerFDIV64(Op, DAG); |
8644 | |
8645 | if (VT == MVT::f16) |
8646 | return LowerFDIV16(Op, DAG); |
8647 | |
8648 | llvm_unreachable("Unexpected type for fdiv"); |
8649 | } |
8650 | |
8651 | SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { |
8652 | SDLoc DL(Op); |
8653 | StoreSDNode *Store = cast<StoreSDNode>(Op); |
8654 | EVT VT = Store->getMemoryVT(); |
8655 | |
8656 | if (VT == MVT::i1) { |
8657 | return DAG.getTruncStore(Store->getChain(), DL, |
8658 | DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), |
8659 | Store->getBasePtr(), MVT::i1, Store->getMemOperand()); |
8660 | } |
8661 | |
8662 | assert(VT.isVector() && |
8663 | Store->getValue().getValueType().getScalarType() == MVT::i32); |
8664 | |
8665 | unsigned AS = Store->getAddressSpace(); |
8666 | if (Subtarget->hasLDSMisalignedBug() && |
8667 | AS == AMDGPUAS::FLAT_ADDRESS && |
8668 | Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) { |
8669 | return SplitVectorStore(Op, DAG); |
8670 | } |
8671 | |
8672 | MachineFunction &MF = DAG.getMachineFunction(); |
8673 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
8674 | |
8675 | |
8676 | if (AS == AMDGPUAS::FLAT_ADDRESS && |
8677 | !Subtarget->hasMultiDwordFlatScratchAddressing()) |
8678 | AS = MFI->hasFlatScratchInit() ? |
8679 | AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS; |
8680 | |
8681 | unsigned NumElements = VT.getVectorNumElements(); |
8682 | if (AS == AMDGPUAS::GLOBAL_ADDRESS || |
8683 | AS == AMDGPUAS::FLAT_ADDRESS) { |
8684 | if (NumElements > 4) |
8685 | return SplitVectorStore(Op, DAG); |
8686 | |
8687 | if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) |
8688 | return SplitVectorStore(Op, DAG); |
8689 | |
8690 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), |
8691 | VT, *Store->getMemOperand())) |
8692 | return expandUnalignedStore(Store, DAG); |
8693 | |
8694 | return SDValue(); |
8695 | } else if (AS == AMDGPUAS::PRIVATE_ADDRESS) { |
8696 | switch (Subtarget->getMaxPrivateElementSize()) { |
8697 | case 4: |
8698 | return scalarizeVectorStore(Store, DAG); |
8699 | case 8: |
8700 | if (NumElements > 2) |
8701 | return SplitVectorStore(Op, DAG); |
8702 | return SDValue(); |
8703 | case 16: |
8704 | if (NumElements > 4 || |
8705 | (NumElements == 3 && !Subtarget->enableFlatScratch())) |
8706 | return SplitVectorStore(Op, DAG); |
8707 | return SDValue(); |
8708 | default: |
8709 | llvm_unreachable("unsupported private_element_size"); |
8710 | } |
8711 | } else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) { |
8712 | |
8713 | if (Subtarget->hasDS96AndDS128() && |
8714 | ((Subtarget->useDS128() && VT.getStoreSize() == 16) || |
8715 | (VT.getStoreSize() == 12)) && |
8716 | allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AS, |
8717 | Store->getAlign())) |
8718 | return SDValue(); |
8719 | |
8720 | if (NumElements > 2) |
8721 | return SplitVectorStore(Op, DAG); |
8722 | |
8723 | |
8724 | |
8725 | |
8726 | |
8727 | |
8728 | if (!Subtarget->hasUsableDSOffset() && |
8729 | NumElements == 2 && VT.getStoreSize() == 8 && |
8730 | Store->getAlignment() < 8) { |
8731 | return SplitVectorStore(Op, DAG); |
8732 | } |
8733 | |
8734 | if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), |
8735 | VT, *Store->getMemOperand())) { |
8736 | if (VT.isVector()) |
8737 | return SplitVectorStore(Op, DAG); |
8738 | return expandUnalignedStore(Store, DAG); |
8739 | } |
8740 | |
8741 | return SDValue(); |
8742 | } else { |
8743 | llvm_unreachable("unhandled address space"); |
8744 | } |
8745 | } |
8746 | |
8747 | SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { |
8748 | SDLoc DL(Op); |
8749 | EVT VT = Op.getValueType(); |
8750 | SDValue Arg = Op.getOperand(0); |
8751 | SDValue TrigVal; |
8752 | |
8753 | |
8754 | |
8755 | auto Flags = Op->getFlags(); |
8756 | |
8757 | SDValue OneOver2Pi = DAG.getConstantFP(0.5 * numbers::inv_pi, DL, VT); |
8758 | |
8759 | if (Subtarget->hasTrigReducedRange()) { |
8760 | SDValue MulVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); |
8761 | TrigVal = DAG.getNode(AMDGPUISD::FRACT, DL, VT, MulVal, Flags); |
8762 | } else { |
8763 | TrigVal = DAG.getNode(ISD::FMUL, DL, VT, Arg, OneOver2Pi, Flags); |
8764 | } |
8765 | |
8766 | switch (Op.getOpcode()) { |
8767 | case ISD::FCOS: |
8768 | return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, TrigVal, Flags); |
8769 | case ISD::FSIN: |
8770 | return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, TrigVal, Flags); |
8771 | default: |
8772 | llvm_unreachable("Wrong trig opcode"); |
8773 | } |
8774 | } |
8775 | |
8776 | SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { |
8777 | AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); |
8778 | assert(AtomicNode->isCompareAndSwap()); |
8779 | unsigned AS = AtomicNode->getAddressSpace(); |
8780 | |
8781 | |
8782 | if (!AMDGPU::isFlatGlobalAddrSpace(AS)) |
8783 | return Op; |
8784 | |
8785 | |
8786 | |
8787 | SDLoc DL(Op); |
8788 | SDValue ChainIn = Op.getOperand(0); |
8789 | SDValue Addr = Op.getOperand(1); |
8790 | SDValue Old = Op.getOperand(2); |
8791 | SDValue New = Op.getOperand(3); |
8792 | EVT VT = Op.getValueType(); |
8793 | MVT SimpleVT = VT.getSimpleVT(); |
8794 | MVT VecType = MVT::getVectorVT(SimpleVT, 2); |
8795 | |
8796 | SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); |
8797 | SDValue Ops[] = { ChainIn, Addr, NewOld }; |
8798 | |
8799 | return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), |
8800 | Ops, VT, AtomicNode->getMemOperand()); |
8801 | } |
8802 | |
8803 | |
8804 | |
8805 | |
8806 | |
8807 | SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, |
8808 | DAGCombinerInfo &DCI) const { |
8809 | EVT VT = N->getValueType(0); |
8810 | EVT ScalarVT = VT.getScalarType(); |
8811 | if (ScalarVT != MVT::f32 && ScalarVT != MVT::f16) |
8812 | return SDValue(); |
8813 | |
8814 | SelectionDAG &DAG = DCI.DAG; |
8815 | SDLoc DL(N); |
8816 | |
8817 | SDValue Src = N->getOperand(0); |
8818 | EVT SrcVT = Src.getValueType(); |
8819 | |
8820 | |
8821 | |
8822 | |
8823 | |
8824 | if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) { |
8825 | if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { |
8826 | SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, MVT::f32, Src); |
8827 | DCI.AddToWorklist(Cvt.getNode()); |
8828 | |
8829 | |
8830 | if (ScalarVT != MVT::f32) { |
8831 | Cvt = DAG.getNode(ISD::FP_ROUND, DL, VT, Cvt, |
8832 | DAG.getTargetConstant(0, DL, MVT::i32)); |
8833 | } |
8834 | return Cvt; |
8835 | } |
8836 | } |
8837 | |
8838 | return SDValue(); |
8839 | } |
8840 | |
8841 | |
8842 | |
8843 | |
8844 | |
8845 | |
8846 | |
8847 | |
8848 | |
8849 | |
8850 | |
8851 | |
8852 | |
8853 | |
8854 | |
8855 | SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, |
8856 | unsigned AddrSpace, |
8857 | EVT MemVT, |
8858 | DAGCombinerInfo &DCI) const { |
8859 | SDValue N0 = N->getOperand(0); |
8860 | SDValue N1 = N->getOperand(1); |
8861 | |
8862 | |
8863 | |
8864 | if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || |
8865 | N0->hasOneUse()) |
8866 | return SDValue(); |
8867 | |
8868 | const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); |
8869 | if (!CN1) |
8870 | return SDValue(); |
8871 | |
8872 | const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
8873 | if (!CAdd) |
8874 | return SDValue(); |
8875 | |
8876 | |
8877 | |
8878 | APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); |
8879 | Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); |
8880 | |
8881 | AddrMode AM; |
8882 | AM.HasBaseReg = true; |
8883 | AM.BaseOffs = Offset.getSExtValue(); |
8884 | if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) |
8885 | return SDValue(); |
8886 | |
8887 | SelectionDAG &DAG = DCI.DAG; |
8888 | SDLoc SL(N); |
8889 | EVT VT = N->getValueType(0); |
8890 | |
8891 | SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); |
8892 | SDValue COffset = DAG.getConstant(Offset, SL, VT); |
8893 | |
8894 | SDNodeFlags Flags; |
8895 | Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && |
8896 | (N0.getOpcode() == ISD::OR || |
8897 | N0->getFlags().hasNoUnsignedWrap())); |
8898 | |
8899 | return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); |
8900 | } |
8901 | |
8902 | |
8903 | |
8904 | |
8905 | static unsigned getBasePtrIndex(const MemSDNode *N) { |
8906 | switch (N->getOpcode()) { |
8907 | case ISD::STORE: |
8908 | case ISD::INTRINSIC_W_CHAIN: |
8909 | case ISD::INTRINSIC_VOID: |
8910 | return 2; |
8911 | default: |
8912 | return 1; |
8913 | } |
8914 | } |
8915 | |
8916 | SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, |
8917 | DAGCombinerInfo &DCI) const { |
8918 | SelectionDAG &DAG = DCI.DAG; |
8919 | SDLoc SL(N); |
8920 | |
8921 | unsigned PtrIdx = getBasePtrIndex(N); |
8922 | SDValue Ptr = N->getOperand(PtrIdx); |
8923 | |
8924 | |
8925 | if (Ptr.getOpcode() == ISD::SHL) { |
8926 | SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), |
8927 | N->getMemoryVT(), DCI); |
8928 | if (NewPtr) { |
8929 | SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); |
8930 | |
8931 | NewOps[PtrIdx] = NewPtr; |
8932 | return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); |
8933 | } |
8934 | } |
8935 | |
8936 | return SDValue(); |
8937 | } |
8938 | |
8939 | static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { |
8940 | return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || |
8941 | (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || |
8942 | (Opc == ISD::XOR && Val == 0); |
8943 | } |
8944 | |
8945 | |
8946 | |
8947 | |
8948 | |
8949 | |
8950 | SDValue SITargetLowering::splitBinaryBitConstantOp( |
8951 | DAGCombinerInfo &DCI, |
8952 | const SDLoc &SL, |
8953 | unsigned Opc, SDValue LHS, |
8954 | const ConstantSDNode *CRHS) const { |
8955 | uint64_t Val = CRHS->getZExtValue(); |
8956 | uint32_t ValLo = Lo_32(Val); |
8957 | uint32_t ValHi = Hi_32(Val); |
8958 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
8959 | |
8960 | if ((bitOpWithConstantIsReducible(Opc, ValLo) || |
8961 | bitOpWithConstantIsReducible(Opc, ValHi)) || |
8962 | (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { |
8963 | |
8964 | |
8965 | |
8966 | return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); |
8967 | } |
8968 | |
8969 | return SDValue(); |
8970 | } |
8971 | |
8972 | |
8973 | |
8974 | static bool isBoolSGPR(SDValue V) { |
8975 | if (V.getValueType() != MVT::i1) |
8976 | return false; |
8977 | switch (V.getOpcode()) { |
8978 | default: |
8979 | break; |
8980 | case ISD::SETCC: |
8981 | case AMDGPUISD::FP_CLASS: |
8982 | return true; |
8983 | case ISD::AND: |
8984 | case ISD::OR: |
8985 | case ISD::XOR: |
8986 | return isBoolSGPR(V.getOperand(0)) && isBoolSGPR(V.getOperand(1)); |
8987 | } |
8988 | return false; |
8989 | } |
8990 | |
8991 | |
8992 | |
8993 | static uint32_t getConstantPermuteMask(uint32_t C) { |
8994 | |
8995 | uint32_t ZeroByteMask = 0; |
8996 | if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff; |
8997 | if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00; |
8998 | if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000; |
8999 | if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000; |
9000 | uint32_t NonZeroByteMask = ~ZeroByteMask; |
9001 | if ((NonZeroByteMask & C) != NonZeroByteMask) |
9002 | return 0; |
9003 | return C; |
9004 | } |
9005 | |
9006 | |
9007 | |
9008 | |
9009 | |
9010 | |
9011 | |
9012 | |
9013 | static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) { |
9014 | assert(V.getValueSizeInBits() == 32); |
9015 | |
9016 | if (V.getNumOperands() != 2) |
9017 | return ~0; |
9018 | |
9019 | ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1)); |
9020 | if (!N1) |
9021 | return ~0; |
9022 | |
9023 | uint32_t C = N1->getZExtValue(); |
9024 | |
9025 | switch (V.getOpcode()) { |
9026 | default: |
9027 | break; |
9028 | case ISD::AND: |
9029 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { |
9030 | return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask); |
9031 | } |
9032 | break; |
9033 | |
9034 | case ISD::OR: |
9035 | if (uint32_t ConstMask = getConstantPermuteMask(C)) { |
9036 | return (0x03020100 & ~ConstMask) | ConstMask; |
9037 | } |
9038 | break; |
9039 | |
9040 | case ISD::SHL: |
9041 | if (C % 8) |
9042 | return ~0; |
9043 | |
9044 | return uint32_t((0x030201000c0c0c0cull << C) >> 32); |
9045 | |
9046 | case ISD::SRL: |
9047 | if (C % 8) |
9048 | return ~0; |
9049 | |
9050 | return uint32_t(0x0c0c0c0c03020100ull >> C); |
9051 | } |
9052 | |
9053 | return ~0; |
9054 | } |
9055 | |
9056 | SDValue SITargetLowering::performAndCombine(SDNode *N, |
9057 | DAGCombinerInfo &DCI) const { |
9058 | if (DCI.isBeforeLegalize()) |
9059 | return SDValue(); |
9060 | |
9061 | SelectionDAG &DAG = DCI.DAG; |
9062 | EVT VT = N->getValueType(0); |
9063 | SDValue LHS = N->getOperand(0); |
9064 | SDValue RHS = N->getOperand(1); |
9065 | |
9066 | |
9067 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); |
9068 | if (VT == MVT::i64 && CRHS) { |
9069 | if (SDValue Split |
9070 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) |
9071 | return Split; |
9072 | } |
9073 | |
9074 | if (CRHS && VT == MVT::i32) { |
9075 | |
9076 | |
9077 | |
9078 | |
9079 | uint64_t Mask = CRHS->getZExtValue(); |
9080 | unsigned Bits = countPopulation(Mask); |
9081 | if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && |
9082 | (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { |
9083 | if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { |
9084 | unsigned Shift = CShift->getZExtValue(); |
9085 | unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); |
9086 | unsigned Offset = NB + Shift; |
9087 | if ((Offset & (Bits - 1)) == 0) { |
9088 | SDLoc SL(N); |
9089 | SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, |
9090 | LHS->getOperand(0), |
9091 | DAG.getConstant(Offset, SL, MVT::i32), |
9092 | DAG.getConstant(Bits, SL, MVT::i32)); |
9093 | EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); |
9094 | SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, |
9095 | DAG.getValueType(NarrowVT)); |
9096 | SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, |
9097 | DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); |
9098 | return Shl; |
9099 | } |
9100 | } |
9101 | } |
9102 | |
9103 | |
9104 | if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM && |
9105 | isa<ConstantSDNode>(LHS.getOperand(2))) { |
9106 | uint32_t Sel = getConstantPermuteMask(Mask); |
9107 | if (!Sel) |
9108 | return SDValue(); |
9109 | |
9110 | |
9111 | Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c); |
9112 | SDLoc DL(N); |
9113 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), |
9114 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); |
9115 | } |
9116 | } |
9117 | |
9118 | |
9119 | |
9120 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { |
9121 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
9122 | ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); |
9123 | |
9124 | SDValue X = LHS.getOperand(0); |
9125 | SDValue Y = RHS.getOperand(0); |
9126 | if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) |
9127 | return SDValue(); |
9128 | |
9129 | if (LCC == ISD::SETO) { |
9130 | if (X != LHS.getOperand(1)) |
9131 | return SDValue(); |
9132 | |
9133 | if (RCC == ISD::SETUNE) { |
9134 | const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); |
9135 | if (!C1 || !C1->isInfinity() || C1->isNegative()) |
9136 | return SDValue(); |
9137 | |
9138 | const uint32_t Mask = SIInstrFlags::N_NORMAL | |
9139 | SIInstrFlags::N_SUBNORMAL | |
9140 | SIInstrFlags::N_ZERO | |
9141 | SIInstrFlags::P_ZERO | |
9142 | SIInstrFlags::P_SUBNORMAL | |
9143 | SIInstrFlags::P_NORMAL; |
9144 | |
9145 | static_assert(((~(SIInstrFlags::S_NAN | |
9146 | SIInstrFlags::Q_NAN | |
9147 | SIInstrFlags::N_INFINITY | |
9148 | SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, |
9149 | "mask not equal"); |
9150 | |
9151 | SDLoc DL(N); |
9152 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
9153 | X, DAG.getConstant(Mask, DL, MVT::i32)); |
9154 | } |
9155 | } |
9156 | } |
9157 | |
9158 | if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) |
9159 | std::swap(LHS, RHS); |
9160 | |
9161 | if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && |
9162 | RHS.hasOneUse()) { |
9163 | ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
9164 | |
9165 | |
9166 | const ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
9167 | if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && |
9168 | (RHS.getOperand(0) == LHS.getOperand(0) && |
9169 | LHS.getOperand(0) == LHS.getOperand(1))) { |
9170 | const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; |
9171 | unsigned NewMask = LCC == ISD::SETO ? |
9172 | Mask->getZExtValue() & ~OrdMask : |
9173 | Mask->getZExtValue() & OrdMask; |
9174 | |
9175 | SDLoc DL(N); |
9176 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), |
9177 | DAG.getConstant(NewMask, DL, MVT::i32)); |
9178 | } |
9179 | } |
9180 | |
9181 | if (VT == MVT::i32 && |
9182 | (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { |
9183 | |
9184 | if (RHS.getOpcode() != ISD::SIGN_EXTEND) |
9185 | std::swap(LHS, RHS); |
9186 | if (isBoolSGPR(RHS.getOperand(0))) |
9187 | return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), |
9188 | LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); |
9189 | } |
9190 | |
9191 | |
9192 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
9193 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && |
9194 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { |
9195 | uint32_t LHSMask = getPermuteMask(DAG, LHS); |
9196 | uint32_t RHSMask = getPermuteMask(DAG, RHS); |
9197 | if (LHSMask != ~0u && RHSMask != ~0u) { |
9198 | |
9199 | |
9200 | if (LHSMask > RHSMask) { |
9201 | std::swap(LHSMask, RHSMask); |
9202 | std::swap(LHS, RHS); |
9203 | } |
9204 | |
9205 | |
9206 | |
9207 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
9208 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
9209 | |
9210 | |
9211 | if (!(LHSUsedLanes & RHSUsedLanes) && |
9212 | |
9213 | |
9214 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { |
9215 | |
9216 | |
9217 | |
9218 | |
9219 | |
9220 | uint32_t Mask = LHSMask & RHSMask; |
9221 | for (unsigned I = 0; I < 32; I += 8) { |
9222 | uint32_t ByteSel = 0xff << I; |
9223 | if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c) |
9224 | Mask &= (0x0c << I) & 0xffffffff; |
9225 | } |
9226 | |
9227 | |
9228 | |
9229 | uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404); |
9230 | SDLoc DL(N); |
9231 | |
9232 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, |
9233 | LHS.getOperand(0), RHS.getOperand(0), |
9234 | DAG.getConstant(Sel, DL, MVT::i32)); |
9235 | } |
9236 | } |
9237 | } |
9238 | |
9239 | return SDValue(); |
9240 | } |
9241 | |
9242 | SDValue SITargetLowering::performOrCombine(SDNode *N, |
9243 | DAGCombinerInfo &DCI) const { |
9244 | SelectionDAG &DAG = DCI.DAG; |
9245 | SDValue LHS = N->getOperand(0); |
9246 | SDValue RHS = N->getOperand(1); |
9247 | |
9248 | EVT VT = N->getValueType(0); |
9249 | if (VT == MVT::i1) { |
9250 | |
9251 | if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && |
9252 | RHS.getOpcode() == AMDGPUISD::FP_CLASS) { |
9253 | SDValue Src = LHS.getOperand(0); |
9254 | if (Src != RHS.getOperand(0)) |
9255 | return SDValue(); |
9256 | |
9257 | const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); |
9258 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
9259 | if (!CLHS || !CRHS) |
9260 | return SDValue(); |
9261 | |
9262 | |
9263 | static const uint32_t MaxMask = 0x3ff; |
9264 | |
9265 | uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; |
9266 | SDLoc DL(N); |
9267 | return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, |
9268 | Src, DAG.getConstant(NewMask, DL, MVT::i32)); |
9269 | } |
9270 | |
9271 | return SDValue(); |
9272 | } |
9273 | |
9274 | |
9275 | if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() && |
9276 | LHS.getOpcode() == AMDGPUISD::PERM && |
9277 | isa<ConstantSDNode>(LHS.getOperand(2))) { |
9278 | uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1)); |
9279 | if (!Sel) |
9280 | return SDValue(); |
9281 | |
9282 | Sel |= LHS.getConstantOperandVal(2); |
9283 | SDLoc DL(N); |
9284 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), |
9285 | LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); |
9286 | } |
9287 | |
9288 | |
9289 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
9290 | if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && |
9291 | N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32_e64) != -1) { |
9292 | uint32_t LHSMask = getPermuteMask(DAG, LHS); |
9293 | uint32_t RHSMask = getPermuteMask(DAG, RHS); |
9294 | if (LHSMask != ~0u && RHSMask != ~0u) { |
9295 | |
9296 | |
9297 | if (LHSMask > RHSMask) { |
9298 | std::swap(LHSMask, RHSMask); |
9299 | std::swap(LHS, RHS); |
9300 | } |
9301 | |
9302 | |
9303 | |
9304 | uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
9305 | uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; |
9306 | |
9307 | |
9308 | if (!(LHSUsedLanes & RHSUsedLanes) && |
9309 | |
9310 | |
9311 | !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { |
9312 | |
9313 | LHSMask &= ~RHSUsedLanes; |
9314 | RHSMask &= ~LHSUsedLanes; |
9315 | |
9316 | LHSMask |= LHSUsedLanes & 0x04040404; |
9317 | |
9318 | uint32_t Sel = LHSMask | RHSMask; |
9319 | SDLoc DL(N); |
9320 | |
9321 | return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, |
9322 | LHS.getOperand(0), RHS.getOperand(0), |
9323 | DAG.getConstant(Sel, DL, MVT::i32)); |
9324 | } |
9325 | } |
9326 | } |
9327 | |
9328 | if (VT != MVT::i64 || DCI.isBeforeLegalizeOps()) |
9329 | return SDValue(); |
9330 | |
9331 | |
9332 | |
9333 | |
9334 | |
9335 | |
9336 | if (LHS.getOpcode() == ISD::ZERO_EXTEND && |
9337 | RHS.getOpcode() != ISD::ZERO_EXTEND) |
9338 | std::swap(LHS, RHS); |
9339 | |
9340 | if (RHS.getOpcode() == ISD::ZERO_EXTEND) { |
9341 | SDValue ExtSrc = RHS.getOperand(0); |
9342 | EVT SrcVT = ExtSrc.getValueType(); |
9343 | if (SrcVT == MVT::i32) { |
9344 | SDLoc SL(N); |
9345 | SDValue LowLHS, HiBits; |
9346 | std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); |
9347 | SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); |
9348 | |
9349 | DCI.AddToWorklist(LowOr.getNode()); |
9350 | DCI.AddToWorklist(HiBits.getNode()); |
9351 | |
9352 | SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, |
9353 | LowOr, HiBits); |
9354 | return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); |
9355 | } |
9356 | } |
9357 | |
9358 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
9359 | if (CRHS) { |
9360 | if (SDValue Split |
9361 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) |
9362 | return Split; |
9363 | } |
9364 | |
9365 | return SDValue(); |
9366 | } |
9367 | |
9368 | SDValue SITargetLowering::performXorCombine(SDNode *N, |
9369 | DAGCombinerInfo &DCI) const { |
9370 | EVT VT = N->getValueType(0); |
9371 | if (VT != MVT::i64) |
9372 | return SDValue(); |
9373 | |
9374 | SDValue LHS = N->getOperand(0); |
9375 | SDValue RHS = N->getOperand(1); |
9376 | |
9377 | const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); |
9378 | if (CRHS) { |
9379 | if (SDValue Split |
9380 | = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) |
9381 | return Split; |
9382 | } |
9383 | |
9384 | return SDValue(); |
9385 | } |
9386 | |
9387 | SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, |
9388 | DAGCombinerInfo &DCI) const { |
9389 | if (!Subtarget->has16BitInsts() || |
9390 | DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
9391 | return SDValue(); |
9392 | |
9393 | EVT VT = N->getValueType(0); |
9394 | if (VT != MVT::i32) |
9395 | return SDValue(); |
9396 | |
9397 | SDValue Src = N->getOperand(0); |
9398 | if (Src.getValueType() != MVT::i16) |
9399 | return SDValue(); |
9400 | |
9401 | return SDValue(); |
9402 | } |
9403 | |
9404 | SDValue SITargetLowering::performSignExtendInRegCombine(SDNode *N, |
9405 | DAGCombinerInfo &DCI) |
9406 | const { |
9407 | SDValue Src = N->getOperand(0); |
9408 | auto *VTSign = cast<VTSDNode>(N->getOperand(1)); |
9409 | |
9410 | if (((Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE && |
9411 | VTSign->getVT() == MVT::i8) || |
9412 | (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_USHORT && |
9413 | VTSign->getVT() == MVT::i16)) && |
9414 | Src.hasOneUse()) { |
9415 | auto *M = cast<MemSDNode>(Src); |
9416 | SDValue Ops[] = { |
9417 | Src.getOperand(0), |
9418 | Src.getOperand(1), |
9419 | Src.getOperand(2), |
9420 | Src.getOperand(3), |
9421 | Src.getOperand(4), |
9422 | Src.getOperand(5), |
9423 | Src.getOperand(6), |
9424 | Src.getOperand(7) |
9425 | }; |
9426 | |
9427 | SDVTList ResList = DCI.DAG.getVTList(MVT::i32, |
9428 | Src.getOperand(0).getValueType()); |
9429 | unsigned Opc = (Src.getOpcode() == AMDGPUISD::BUFFER_LOAD_UBYTE) ? |
9430 | AMDGPUISD::BUFFER_LOAD_BYTE : AMDGPUISD::BUFFER_LOAD_SHORT; |
9431 | SDValue BufferLoadSignExt = DCI.DAG.getMemIntrinsicNode(Opc, SDLoc(N), |
9432 | ResList, |
9433 | Ops, M->getMemoryVT(), |
9434 | M->getMemOperand()); |
9435 | return DCI.DAG.getMergeValues({BufferLoadSignExt, |
9436 | BufferLoadSignExt.getValue(1)}, SDLoc(N)); |
9437 | } |
9438 | return SDValue(); |
9439 | } |
9440 | |
9441 | SDValue SITargetLowering::performClassCombine(SDNode *N, |
9442 | DAGCombinerInfo &DCI) const { |
9443 | SelectionDAG &DAG = DCI.DAG; |
9444 | SDValue Mask = N->getOperand(1); |
9445 | |
9446 | |
9447 | if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { |
9448 | if (CMask->isNullValue()) |
9449 | return DAG.getConstant(0, SDLoc(N), MVT::i1); |
9450 | } |
9451 | |
9452 | if (N->getOperand(0).isUndef()) |
9453 | return DAG.getUNDEF(MVT::i1); |
9454 | |
9455 | return SDValue(); |
9456 | } |
9457 | |
9458 | SDValue SITargetLowering::performRcpCombine(SDNode *N, |
9459 | DAGCombinerInfo &DCI) const { |
9460 | EVT VT = N->getValueType(0); |
9461 | SDValue N0 = N->getOperand(0); |
9462 | |
9463 | if (N0.isUndef()) |
9464 | return N0; |
9465 | |
9466 | if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP || |
9467 | N0.getOpcode() == ISD::SINT_TO_FP)) { |
9468 | return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0, |
9469 | N->getFlags()); |
9470 | } |
9471 | |
9472 | if ((VT == MVT::f32 || VT == MVT::f16) && N0.getOpcode() == ISD::FSQRT) { |
9473 | return DCI.DAG.getNode(AMDGPUISD::RSQ, SDLoc(N), VT, |
9474 | N0.getOperand(0), N->getFlags()); |
9475 | } |
9476 | |
9477 | return AMDGPUTargetLowering::performRcpCombine(N, DCI); |
9478 | } |
9479 | |
9480 | bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op, |
9481 | unsigned MaxDepth) const { |
9482 | unsigned Opcode = Op.getOpcode(); |
9483 | if (Opcode == ISD::FCANONICALIZE) |
9484 | return true; |
9485 | |
9486 | if (auto *CFP = dyn_cast<ConstantFPSDNode>(Op)) { |
9487 | auto F = CFP->getValueAPF(); |
9488 | if (F.isNaN() && F.isSignaling()) |
9489 | return false; |
9490 | return !F.isDenormal() || denormalsEnabledForType(DAG, Op.getValueType()); |
9491 | } |
9492 | |
9493 | |
9494 | |
9495 | if (MaxDepth == 0) |
9496 | return false; |
9497 | |
9498 | switch (Opcode) { |
9499 | |
9500 | case ISD::FADD: |
9501 | case ISD::FSUB: |
9502 | case ISD::FMUL: |
9503 | case ISD::FCEIL: |
9504 | case ISD::FFLOOR: |
9505 | case ISD::FMA: |
9506 | case ISD::FMAD: |
9507 | case ISD::FSQRT: |
9508 | case ISD::FDIV: |
9509 | case ISD::FREM: |
9510 | case ISD::FP_ROUND: |
9511 | case ISD::FP_EXTEND: |
9512 | case AMDGPUISD::FMUL_LEGACY: |
9513 | case AMDGPUISD::FMAD_FTZ: |
9514 | case AMDGPUISD::RCP: |
9515 | case AMDGPUISD::RSQ: |
9516 | case AMDGPUISD::RSQ_CLAMP: |
9517 | case AMDGPUISD::RCP_LEGACY: |
9518 | case AMDGPUISD::RCP_IFLAG: |
9519 | case AMDGPUISD::DIV_SCALE: |
9520 | case AMDGPUISD::DIV_FMAS: |
9521 | case AMDGPUISD::DIV_FIXUP: |
9522 | case AMDGPUISD::FRACT: |
9523 | case AMDGPUISD::LDEXP: |
9524 | case AMDGPUISD::CVT_PKRTZ_F16_F32: |
9525 | case AMDGPUISD::CVT_F32_UBYTE0: |
9526 | case AMDGPUISD::CVT_F32_UBYTE1: |
9527 | case AMDGPUISD::CVT_F32_UBYTE2: |
9528 | case AMDGPUISD::CVT_F32_UBYTE3: |
9529 | return true; |
9530 | |
9531 | |
9532 | |
9533 | case ISD::FNEG: |
9534 | case ISD::FABS: |
9535 | case ISD::FCOPYSIGN: |
9536 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); |
9537 | |
9538 | case ISD::FSIN: |
9539 | case ISD::FCOS: |
9540 | case ISD::FSINCOS: |
9541 | return Op.getValueType().getScalarType() != MVT::f16; |
9542 | |
9543 | case ISD::FMINNUM: |
9544 | case ISD::FMAXNUM: |
9545 | case ISD::FMINNUM_IEEE: |
9546 | case ISD::FMAXNUM_IEEE: |
9547 | case AMDGPUISD::CLAMP: |
9548 | case AMDGPUISD::FMED3: |
9549 | case AMDGPUISD::FMAX3: |
9550 | case AMDGPUISD::FMIN3: { |
9551 | |
9552 | |
9553 | |
9554 | |
9555 | |
9556 | if (Subtarget->supportsMinMaxDenormModes() || |
9557 | denormalsEnabledForType(DAG, Op.getValueType())) |
9558 | return true; |
9559 | |
9560 | |
9561 | |
9562 | |
9563 | |
9564 | |
9565 | for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I) { |
9566 | if (!isCanonicalized(DAG, Op.getOperand(I), MaxDepth - 1)) |
9567 | return false; |
9568 | } |
9569 | |
9570 | return true; |
9571 | } |
9572 | case ISD::SELECT: { |
9573 | return isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1) && |
9574 | isCanonicalized(DAG, Op.getOperand(2), MaxDepth - 1); |
9575 | } |
9576 | case ISD::BUILD_VECTOR: { |
9577 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { |
9578 | SDValue SrcOp = Op.getOperand(i); |
9579 | if (!isCanonicalized(DAG, SrcOp, MaxDepth - 1)) |
9580 | return false; |
9581 | } |
9582 | |
9583 | return true; |
9584 | } |
9585 | case ISD::EXTRACT_VECTOR_ELT: |
9586 | case ISD::EXTRACT_SUBVECTOR: { |
9587 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); |
9588 | } |
9589 | case ISD::INSERT_VECTOR_ELT: { |
9590 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1) && |
9591 | isCanonicalized(DAG, Op.getOperand(1), MaxDepth - 1); |
9592 | } |
9593 | case ISD::UNDEF: |
9594 | |
9595 | return false; |
9596 | |
9597 | case ISD::BITCAST: |
9598 | return isCanonicalized(DAG, Op.getOperand(0), MaxDepth - 1); |
9599 | case ISD::TRUNCATE: { |
9600 | |
9601 | if (Op.getValueType() == MVT::i16) { |
9602 | SDValue TruncSrc = Op.getOperand(0); |
9603 | if (TruncSrc.getValueType() == MVT::i32 && |
9604 | TruncSrc.getOpcode() == ISD::BITCAST && |
9605 | TruncSrc.getOperand(0).getValueType() == MVT::v2f16) { |
9606 | return isCanonicalized(DAG, TruncSrc.getOperand(0), MaxDepth - 1); |
9607 | } |
9608 | } |
9609 | return false; |
9610 | } |
9611 | case ISD::INTRINSIC_WO_CHAIN: { |
9612 | unsigned IntrinsicID |
9613 | = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); |
9614 | |
9615 | switch (IntrinsicID) { |
9616 | case Intrinsic::amdgcn_cvt_pkrtz: |
9617 | case Intrinsic::amdgcn_cubeid: |
9618 | case Intrinsic::amdgcn_frexp_mant: |
9619 | case Intrinsic::amdgcn_fdot2: |
9620 | case Intrinsic::amdgcn_rcp: |
9621 | case Intrinsic::amdgcn_rsq: |
9622 | case Intrinsic::amdgcn_rsq_clamp: |
9623 | case Intrinsic::amdgcn_rcp_legacy: |
9624 | case Intrinsic::amdgcn_rsq_legacy: |
9625 | case Intrinsic::amdgcn_trig_preop: |
9626 | return true; |
9627 | default: |
9628 | break; |
9629 | } |
9630 | |
9631 | LLVM_FALLTHROUGH; |
9632 | } |
9633 | default: |
9634 | return denormalsEnabledForType(DAG, Op.getValueType()) && |
9635 | DAG.isKnownNeverSNaN(Op); |
9636 | } |
9637 | |
9638 | llvm_unreachable("invalid operation"); |
9639 | } |
9640 | |
9641 | bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF, |
9642 | unsigned MaxDepth) const { |
9643 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
9644 | MachineInstr *MI = MRI.getVRegDef(Reg); |
9645 | unsigned Opcode = MI->getOpcode(); |
9646 | |
9647 | if (Opcode == AMDGPU::G_FCANONICALIZE) |
9648 | return true; |
9649 | |
9650 | if (Opcode == AMDGPU::G_FCONSTANT) { |
9651 | auto F = MI->getOperand(1).getFPImm()->getValueAPF(); |
9652 | if (F.isNaN() && F.isSignaling()) |
9653 | return false; |
9654 | return !F.isDenormal() || denormalsEnabledForType(MRI.getType(Reg), MF); |
9655 | } |
9656 | |
9657 | if (MaxDepth == 0) |
9658 | return false; |
9659 | |
9660 | switch (Opcode) { |
9661 | case AMDGPU::G_FMINNUM_IEEE: |
9662 | case AMDGPU::G_FMAXNUM_IEEE: { |
9663 | if (Subtarget->supportsMinMaxDenormModes() || |
9664 | denormalsEnabledForType(MRI.getType(Reg), MF)) |
9665 | return true; |
9666 | for (unsigned I = 1, E = MI->getNumOperands(); I != E; ++I) { |
9667 | if (!isCanonicalized(MI->getOperand(I).getReg(), MF, MaxDepth - 1)) |
9668 | return false; |
9669 | } |
9670 | return true; |
9671 | } |
9672 | default: |
9673 | return denormalsEnabledForType(MRI.getType(Reg), MF) && |
9674 | isKnownNeverSNaN(Reg, MRI); |
9675 | } |
9676 | |
9677 | llvm_unreachable("invalid operation"); |
9678 | } |
9679 | |
9680 | |
9681 | SDValue SITargetLowering::getCanonicalConstantFP( |
9682 | SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const { |
9683 | |
9684 | if (C.isDenormal() && !denormalsEnabledForType(DAG, VT)) |
9685 | return DAG.getConstantFP(0.0, SL, VT); |
9686 | |
9687 | if (C.isNaN()) { |
9688 | APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); |
9689 | if (C.isSignaling()) { |
9690 | |
9691 | |
9692 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); |
9693 | } |
9694 | |
9695 | |
9696 | |
9697 | |
9698 | |
9699 | if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) |
9700 | return DAG.getConstantFP(CanonicalQNaN, SL, VT); |
9701 | } |
9702 | |
9703 | |
9704 | return DAG.getConstantFP(C, SL, VT); |
9705 | } |
9706 | |
9707 | static bool vectorEltWillFoldAway(SDValue Op) { |
9708 | return Op.isUndef() || isa<ConstantFPSDNode>(Op); |
9709 | } |
9710 | |
9711 | SDValue SITargetLowering::performFCanonicalizeCombine( |
9712 | SDNode *N, |
9713 | DAGCombinerInfo &DCI) const { |
9714 | SelectionDAG &DAG = DCI.DAG; |
9715 | SDValue N0 = N->getOperand(0); |
9716 | EVT VT = N->getValueType(0); |
9717 | |
9718 | |
9719 | if (N0.isUndef()) { |
| |
9720 | APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT)); |
9721 | return DAG.getConstantFP(QNaN, SDLoc(N), VT); |
9722 | } |
9723 | |
9724 | if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0)) { |
| |
| |
9725 | EVT VT = N->getValueType(0); |
9726 | return getCanonicalConstantFP(DAG, SDLoc(N), VT, CFP->getValueAPF()); |
9727 | } |
9728 | |
9729 | |
9730 | |
9731 | |
9732 | |
9733 | |
9734 | |
9735 | |
9736 | if (N0.getOpcode() == ISD::BUILD_VECTOR && VT == MVT::v2f16 && |
| 8 | | Assuming the condition is true | |
|
| |
9737 | isTypeLegal(MVT::v2f16)) { |
9738 | SDLoc SL(N); |
9739 | SDValue NewElts[2]; |
9740 | SDValue Lo = N0.getOperand(0); |
9741 | SDValue Hi = N0.getOperand(1); |
9742 | EVT EltVT = Lo.getValueType(); |
9743 | |
9744 | if (vectorEltWillFoldAway(Lo) || vectorEltWillFoldAway(Hi)) { |
9745 | for (unsigned I = 0; I != 2; ++I) { |
| 10 | | Loop condition is true. Entering loop body | |
|
| 18 | | Loop condition is true. Entering loop body | |
|
9746 | SDValue Op = N0.getOperand(I); |
| 19 | | Value assigned to 'Op.Node' | |
|
9747 | if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { |
| 11 | | Calling 'dyn_cast<llvm::ConstantFPSDNode, llvm::SDValue>' | |
|
| 15 | | Returning from 'dyn_cast<llvm::ConstantFPSDNode, llvm::SDValue>' | |
|
| |
| 20 | | Calling 'dyn_cast<llvm::ConstantFPSDNode, llvm::SDValue>' | |
|
| 33 | | Returning from 'dyn_cast<llvm::ConstantFPSDNode, llvm::SDValue>' | |
|
| |
| |
9748 | NewElts[I] = getCanonicalConstantFP(DAG, SL, EltVT, |
9749 | CFP->getValueAPF()); |
9750 | } else if (Op.isUndef()) { |
| |
| 36 | | Calling 'SDValue::isUndef' | |
|
9751 | |
9752 | NewElts[I] = Op; |
9753 | } else { |
9754 | NewElts[I] = DAG.getNode(ISD::FCANONICALIZE, SL, EltVT, Op); |
9755 | } |
9756 | } |
9757 | |
9758 | |
9759 | |
9760 | |
9761 | if (NewElts[0].isUndef()) { |
9762 | if (isa<ConstantFPSDNode>(NewElts[1])) |
9763 | NewElts[0] = isa<ConstantFPSDNode>(NewElts[1]) ? |
9764 | NewElts[1]: DAG.getConstantFP(0.0f, SL, EltVT); |
9765 | } |
9766 | |
9767 | if (NewElts[1].isUndef()) { |
9768 | NewElts[1] = isa<ConstantFPSDNode>(NewElts[0]) ? |
9769 | NewElts[0] : DAG.getConstantFP(0.0f, SL, EltVT); |
9770 | } |
9771 | |
9772 | return DAG.getBuildVector(VT, SL, NewElts); |
9773 | } |
9774 | } |
9775 | |
9776 | unsigned SrcOpc = N0.getOpcode(); |
9777 | |
9778 | |
9779 | |
9780 | |
9781 | |
9782 | |
9783 | if (SrcOpc == ISD::FMINNUM || SrcOpc == ISD::FMAXNUM) { |
9784 | auto *CRHS = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); |
9785 | if (CRHS && N0.hasOneUse()) { |
9786 | SDLoc SL(N); |
9787 | SDValue Canon0 = DAG.getNode(ISD::FCANONICALIZE, SL, VT, |
9788 | N0.getOperand(0)); |
9789 | SDValue Canon1 = getCanonicalConstantFP(DAG, SL, VT, CRHS->getValueAPF()); |
9790 | DCI.AddToWorklist(Canon0.getNode()); |
9791 | |
9792 | return DAG.getNode(N0.getOpcode(), SL, VT, Canon0, Canon1); |
9793 | } |
9794 | } |
9795 | |
9796 | return isCanonicalized(DAG, N0) ? N0 : SDValue(); |
9797 | } |
9798 | |
9799 | static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { |
9800 | switch (Opc) { |
9801 | case ISD::FMAXNUM: |
9802 | case ISD::FMAXNUM_IEEE: |
9803 | return AMDGPUISD::FMAX3; |
9804 | case ISD::SMAX: |
9805 | return AMDGPUISD::SMAX3; |
9806 | case ISD::UMAX: |
9807 | return AMDGPUISD::UMAX3; |
9808 | case ISD::FMINNUM: |
9809 | case ISD::FMINNUM_IEEE: |
9810 | return AMDGPUISD::FMIN3; |
9811 | case ISD::SMIN: |
9812 | return AMDGPUISD::SMIN3; |
9813 | case ISD::UMIN: |
9814 | return AMDGPUISD::UMIN3; |
9815 | default: |
9816 | llvm_unreachable("Not a min/max opcode"); |
9817 | } |
9818 | } |
9819 | |
9820 | SDValue SITargetLowering::performIntMed3ImmCombine( |
9821 | SelectionDAG &DAG, const SDLoc &SL, |
9822 | SDValue Op0, SDValue Op1, bool Signed) const { |
9823 | ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); |
9824 | if (!K1) |
9825 | return SDValue(); |
9826 | |
9827 | ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); |
9828 | if (!K0) |
9829 | return SDValue(); |
9830 | |
9831 | if (Signed) { |
9832 | if (K0->getAPIntValue().sge(K1->getAPIntValue())) |
9833 | return SDValue(); |
9834 | } else { |
9835 | if (K0->getAPIntValue().uge(K1->getAPIntValue())) |
9836 | return SDValue(); |
9837 | } |
9838 | |
9839 | EVT VT = K0->getValueType(0); |
9840 | unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; |
9841 | if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { |
9842 | return DAG.getNode(Med3Opc, SL, VT, |
9843 | Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); |
9844 | } |
9845 | |
9846 | |
9847 | if (VT == MVT::i16) { |
9848 | MVT NVT = MVT::i32; |
9849 | unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
9850 | |
9851 | SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); |
9852 | SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); |
9853 | SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); |
9854 | |
9855 | SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); |
9856 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); |
9857 | } |
9858 | |
9859 | return SDValue(); |
9860 | } |
9861 | |
9862 | static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { |
9863 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) |
9864 | return C; |
9865 | |
9866 | if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { |
9867 | if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) |
9868 | return C; |
9869 | } |
9870 | |
9871 | return nullptr; |
9872 | } |
9873 | |
9874 | SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, |
9875 | const SDLoc &SL, |
9876 | SDValue Op0, |
9877 | SDValue Op1) const { |
9878 | ConstantFPSDNode *K1 = getSplatConstantFP(Op1); |
9879 | if (!K1) |
9880 | return SDValue(); |
9881 | |
9882 | ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); |
9883 | if (!K0) |
9884 | return SDValue(); |
9885 | |
9886 | |
9887 | if (K0->getValueAPF() > K1->getValueAPF()) |
9888 | return SDValue(); |
9889 | |
9890 | const MachineFunction &MF = DAG.getMachineFunction(); |
9891 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
9892 | |
9893 | |
9894 | EVT VT = Op0.getValueType(); |
9895 | if (Info->getMode().DX10Clamp) { |
9896 | |
9897 | |
9898 | |
9899 | if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) |
9900 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); |
9901 | } |
9902 | |
9903 | |
9904 | if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { |
9905 | |
9906 | |
9907 | |
9908 | |
9909 | SDValue Var = Op0.getOperand(0); |
9910 | if (!DAG.isKnownNeverSNaN(Var)) |
9911 | return SDValue(); |
9912 | |
9913 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
9914 | |
9915 | if ((!K0->hasOneUse() || |
9916 | TII->isInlineConstant(K0->getValueAPF().bitcastToAPInt())) && |
9917 | (!K1->hasOneUse() || |
9918 | TII->isInlineConstant(K1->getValueAPF().bitcastToAPInt()))) { |
9919 | return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), |
9920 | Var, SDValue(K0, 0), SDValue(K1, 0)); |
9921 | } |
9922 | } |
9923 | |
9924 | return SDValue(); |
9925 | } |
9926 | |
9927 | SDValue SITargetLowering::performMinMaxCombine(SDNode *N, |
9928 | DAGCombinerInfo &DCI) const { |
9929 | SelectionDAG &DAG = DCI.DAG; |
9930 | |
9931 | EVT VT = N->getValueType(0); |
9932 | unsigned Opc = N->getOpcode(); |
9933 | SDValue Op0 = N->getOperand(0); |
9934 | SDValue Op1 = N->getOperand(1); |
9935 | |
9936 | |
9937 | |
9938 | |
9939 | if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && |
9940 | !VT.isVector() && |
9941 | (VT == MVT::i32 || VT == MVT::f32 || |
9942 | ((VT == MVT::f16 || VT == MVT::i16) && Subtarget->hasMin3Max3_16()))) { |
9943 | |
9944 | |
9945 | if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { |
9946 | SDLoc DL(N); |
9947 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
9948 | DL, |
9949 | N->getValueType(0), |
9950 | Op0.getOperand(0), |
9951 | Op0.getOperand(1), |
9952 | Op1); |
9953 | } |
9954 | |
9955 | |
9956 | |
9957 | |
9958 | if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { |
9959 | SDLoc DL(N); |
9960 | return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), |
9961 | DL, |
9962 | N->getValueType(0), |
9963 | Op0, |
9964 | Op1.getOperand(0), |
9965 | Op1.getOperand(1)); |
9966 | } |
9967 | } |
9968 | |
9969 | |
9970 | if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { |
9971 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) |
9972 | return Med3; |
9973 | } |
9974 | |
9975 | if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { |
9976 | if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) |
9977 | return Med3; |
9978 | } |
9979 | |
9980 | |
9981 | if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || |
9982 | (Opc == ISD::FMINNUM_IEEE && Op0.getOpcode() == ISD::FMAXNUM_IEEE) || |
9983 | (Opc == AMDGPUISD::FMIN_LEGACY && |
9984 | Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && |
9985 | (VT == MVT::f32 || VT == MVT::f64 || |
9986 | (VT == MVT::f16 && Subtarget->has16BitInsts()) || |
9987 | (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && |
9988 | Op0.hasOneUse()) { |
9989 | if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) |
9990 | return Res; |
9991 | } |
9992 | |
9993 | return SDValue(); |
9994 | } |
9995 | |
9996 | static bool isClampZeroToOne(SDValue A, SDValue B) { |
9997 | if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { |
9998 | if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { |
9999 | |
10000 | return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || |
10001 | (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); |
10002 | } |
10003 | } |
10004 | |
10005 | return false; |
10006 | } |
10007 | |
10008 | |
10009 | SDValue SITargetLowering::performFMed3Combine(SDNode *N, |
10010 | DAGCombinerInfo &DCI) const { |
10011 | EVT VT = N->getValueType(0); |
10012 | |
10013 | |
10014 | |
10015 | SelectionDAG &DAG = DCI.DAG; |
10016 | SDLoc SL(N); |
10017 | |
10018 | SDValue Src0 = N->getOperand(0); |
10019 | SDValue Src1 = N->getOperand(1); |
10020 | SDValue Src2 = N->getOperand(2); |
10021 | |
10022 | if (isClampZeroToOne(Src0, Src1)) { |
10023 | |
10024 | |
10025 | |
10026 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); |
10027 | } |
10028 | |
10029 | const MachineFunction &MF = DAG.getMachineFunction(); |
10030 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
10031 | |
10032 | |
10033 | |
10034 | if (Info->getMode().DX10Clamp) { |
10035 | |
10036 | |
10037 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) |
10038 | std::swap(Src0, Src1); |
10039 | |
10040 | if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) |
10041 | std::swap(Src1, Src2); |
10042 | |
10043 | if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) |
10044 | std::swap(Src0, Src1); |
10045 | |
10046 | if (isClampZeroToOne(Src1, Src2)) |
10047 | return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); |
10048 | } |
10049 | |
10050 | return SDValue(); |
10051 | } |
10052 | |
10053 | SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, |
10054 | DAGCombinerInfo &DCI) const { |
10055 | SDValue Src0 = N->getOperand(0); |
10056 | SDValue Src1 = N->getOperand(1); |
10057 | if (Src0.isUndef() && Src1.isUndef()) |
10058 | return DCI.DAG.getUNDEF(N->getValueType(0)); |
10059 | return SDValue(); |
10060 | } |
10061 | |
10062 | |
10063 | |
10064 | bool SITargetLowering::shouldExpandVectorDynExt(unsigned EltSize, |
10065 | unsigned NumElem, |
10066 | bool IsDivergentIdx) { |
10067 | if (UseDivergentRegisterIndexing) |
10068 | return false; |
10069 | |
10070 | unsigned VecSize = EltSize * NumElem; |
10071 | |
10072 | |
10073 | if (VecSize <= 64 && EltSize < 32) |
10074 | return false; |
10075 | |
10076 | |
10077 | |
10078 | if (EltSize < 32) |
10079 | return true; |
10080 | |
10081 | |
10082 | if (IsDivergentIdx) |
10083 | return true; |
10084 | |
10085 | |
10086 | unsigned NumInsts = NumElem + |
10087 | ((EltSize + 31) / 32) * NumElem ; |
10088 | return NumInsts <= 16; |
10089 | } |
10090 | |
10091 | static bool shouldExpandVectorDynExt(SDNode *N) { |
10092 | SDValue Idx = N->getOperand(N->getNumOperands() - 1); |
10093 | if (isa<ConstantSDNode>(Idx)) |
10094 | return false; |
10095 | |
10096 | SDValue Vec = N->getOperand(0); |
10097 | EVT VecVT = Vec.getValueType(); |
10098 | EVT EltVT = VecVT.getVectorElementType(); |
10099 | unsigned EltSize = EltVT.getSizeInBits(); |
10100 | unsigned NumElem = VecVT.getVectorNumElements(); |
10101 | |
10102 | return SITargetLowering::shouldExpandVectorDynExt(EltSize, NumElem, |
10103 | Idx->isDivergent()); |
10104 | } |
10105 | |
10106 | SDValue SITargetLowering::performExtractVectorEltCombine( |
10107 | SDNode *N, DAGCombinerInfo &DCI) const { |
10108 | SDValue Vec = N->getOperand(0); |
10109 | SelectionDAG &DAG = DCI.DAG; |
10110 | |
10111 | EVT VecVT = Vec.getValueType(); |
10112 | EVT EltVT = VecVT.getVectorElementType(); |
10113 | |
10114 | if ((Vec.getOpcode() == ISD::FNEG || |
10115 | Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) { |
10116 | SDLoc SL(N); |
10117 | EVT EltVT = N->getValueType(0); |
10118 | SDValue Idx = N->getOperand(1); |
10119 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
10120 | Vec.getOperand(0), Idx); |
10121 | return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt); |
10122 | } |
10123 | |
10124 | |
10125 | |
10126 | |
10127 | |
10128 | |
10129 | if (Vec.hasOneUse() && DCI.isBeforeLegalize()) { |
10130 | SDLoc SL(N); |
10131 | EVT EltVT = N->getValueType(0); |
10132 | SDValue Idx = N->getOperand(1); |
10133 | unsigned Opc = Vec.getOpcode(); |
10134 | |
10135 | switch(Opc) { |
10136 | default: |
10137 | break; |
10138 | |
10139 | case ISD::FADD: |
10140 | case ISD::FSUB: |
10141 | case ISD::FMUL: |
10142 | case ISD::ADD: |
10143 | case ISD::UMIN: |
10144 | case ISD::UMAX: |
10145 | case ISD::SMIN: |
10146 | case ISD::SMAX: |
10147 | case ISD::FMAXNUM: |
10148 | case ISD::FMINNUM: |
10149 | case ISD::FMAXNUM_IEEE: |
10150 | case ISD::FMINNUM_IEEE: { |
10151 | SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
10152 | Vec.getOperand(0), Idx); |
10153 | SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, |
10154 | Vec.getOperand(1), Idx); |
10155 | |
10156 | DCI.AddToWorklist(Elt0.getNode()); |
10157 | DCI.AddToWorklist(Elt1.getNode()); |
10158 | return DAG.getNode(Opc, SL, EltVT, Elt0, Elt1, Vec->getFlags()); |
10159 | } |
10160 | } |
10161 | } |
10162 | |
10163 | unsigned VecSize = VecVT.getSizeInBits(); |
10164 | unsigned EltSize = EltVT.getSizeInBits(); |
10165 | |
10166 | |
10167 | if (::shouldExpandVectorDynExt(N)) { |
10168 | SDLoc SL(N); |
10169 | SDValue Idx = N->getOperand(1); |
10170 | SDValue V; |
10171 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { |
10172 | SDValue IC = DAG.getVectorIdxConstant(I, SL); |
10173 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); |
10174 | if (I == 0) |
10175 | V = Elt; |
10176 | else |
10177 | V = DAG.getSelectCC(SL, Idx, IC, Elt, V, ISD::SETEQ); |
10178 | } |
10179 | return V; |
10180 | } |
10181 | |
10182 | if (!DCI.isBeforeLegalize()) |
10183 | return SDValue(); |
10184 | |
10185 | |
10186 | |
10187 | |
10188 | auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
10189 | if (isa<MemSDNode>(Vec) && |
10190 | EltSize <= 16 && |
10191 | EltVT.isByteSized() && |
10192 | VecSize > 32 && |
10193 | VecSize % 32 == 0 && |
10194 | Idx) { |
10195 | EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); |
10196 | |
10197 | unsigned BitIndex = Idx->getZExtValue() * EltSize; |
10198 | unsigned EltIdx = BitIndex / 32; |
10199 | unsigned LeftoverBitIdx = BitIndex % 32; |
10200 | SDLoc SL(N); |
10201 | |
10202 | SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec); |
10203 | DCI.AddToWorklist(Cast.getNode()); |
10204 | |
10205 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast, |
10206 | DAG.getConstant(EltIdx, SL, MVT::i32)); |
10207 | DCI.AddToWorklist(Elt.getNode()); |
10208 | SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt, |
10209 | DAG.getConstant(LeftoverBitIdx, SL, MVT::i32)); |
10210 | DCI.AddToWorklist(Srl.getNode()); |
10211 | |
10212 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl); |
10213 | DCI.AddToWorklist(Trunc.getNode()); |
10214 | return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc); |
10215 | } |
10216 | |
10217 | return SDValue(); |
10218 | } |
10219 | |
10220 | SDValue |
10221 | SITargetLowering::performInsertVectorEltCombine(SDNode *N, |
10222 | DAGCombinerInfo &DCI) const { |
10223 | SDValue Vec = N->getOperand(0); |
10224 | SDValue Idx = N->getOperand(2); |
10225 | EVT VecVT = Vec.getValueType(); |
10226 | EVT EltVT = VecVT.getVectorElementType(); |
10227 | |
10228 | |
10229 | |
10230 | if (!::shouldExpandVectorDynExt(N)) |
10231 | return SDValue(); |
10232 | |
10233 | SelectionDAG &DAG = DCI.DAG; |
10234 | SDLoc SL(N); |
10235 | SDValue Ins = N->getOperand(1); |
10236 | EVT IdxVT = Idx.getValueType(); |
10237 | |
10238 | SmallVector<SDValue, 16> Ops; |
10239 | for (unsigned I = 0, E = VecVT.getVectorNumElements(); I < E; ++I) { |
10240 | SDValue IC = DAG.getConstant(I, SL, IdxVT); |
10241 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Vec, IC); |
10242 | SDValue V = DAG.getSelectCC(SL, Idx, IC, Ins, Elt, ISD::SETEQ); |
10243 | Ops.push_back(V); |
10244 | } |
10245 | |
10246 | return DAG.getBuildVector(VecVT, SL, Ops); |
10247 | } |
10248 | |
10249 | unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, |
10250 | const SDNode *N0, |
10251 | const SDNode *N1) const { |
10252 | EVT VT = N0->getValueType(0); |
10253 | |
10254 | |
10255 | |
10256 | if (((VT == MVT::f32 && !hasFP32Denormals(DAG.getMachineFunction())) || |
10257 | (VT == MVT::f16 && !hasFP64FP16Denormals(DAG.getMachineFunction()) && |
10258 | getSubtarget()->hasMadF16())) && |
10259 | isOperationLegal(ISD::FMAD, VT)) |
10260 | return ISD::FMAD; |
10261 | |
10262 | const TargetOptions &Options = DAG.getTarget().Options; |
10263 | if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || |
10264 | (N0->getFlags().hasAllowContract() && |
10265 | N1->getFlags().hasAllowContract())) && |
10266 | isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) { |
10267 | return ISD::FMA; |
10268 | } |
10269 | |
10270 | return 0; |
10271 | } |
10272 | |
10273 | |
10274 | |
10275 | SDValue SITargetLowering::reassociateScalarOps(SDNode *N, |
10276 | SelectionDAG &DAG) const { |
10277 | EVT VT = N->getValueType(0); |
10278 | if (VT != MVT::i32 && VT != MVT::i64) |
10279 | return SDValue(); |
10280 | |
10281 | unsigned Opc = N->getOpcode(); |
10282 | SDValue Op0 = N->getOperand(0); |
10283 | SDValue Op1 = N->getOperand(1); |
10284 | |
10285 | if (!(Op0->isDivergent() ^ Op1->isDivergent())) |
10286 | return SDValue(); |
10287 | |
10288 | if (Op0->isDivergent()) |
10289 | std::swap(Op0, Op1); |
10290 | |
10291 | if (Op1.getOpcode() != Opc || !Op1.hasOneUse()) |
10292 | return SDValue(); |
10293 | |
10294 | SDValue Op2 = Op1.getOperand(1); |
10295 | Op1 = Op1.getOperand(0); |
10296 | if (!(Op1->isDivergent() ^ Op2->isDivergent())) |
10297 | return SDValue(); |
10298 | |
10299 | if (Op1->isDivergent()) |
10300 | std::swap(Op1, Op2); |
10301 | |
10302 | |
10303 | |
10304 | if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) || |
10305 | DAG.isConstantIntBuildVectorOrConstantInt(Op1)) |
10306 | return SDValue(); |
10307 | |
10308 | SDLoc SL(N); |
10309 | SDValue Add1 = DAG.getNode(Opc, SL, VT, Op0, Op1); |
10310 | return DAG.getNode(Opc, SL, VT, Add1, Op2); |
10311 | } |
10312 | |
10313 | static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, |
10314 | EVT VT, |
10315 | SDValue N0, SDValue N1, SDValue N2, |
10316 | bool Signed) { |
10317 | unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; |
10318 | SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); |
10319 | SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); |
10320 | return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); |
10321 | } |
10322 | |
10323 | SDValue SITargetLowering::performAddCombine(SDNode *N, |
10324 | DAGCombinerInfo &DCI) const { |
10325 | SelectionDAG &DAG = DCI.DAG; |
10326 | EVT VT = N->getValueType(0); |
10327 | SDLoc SL(N); |
10328 | SDValue LHS = N->getOperand(0); |
10329 | SDValue RHS = N->getOperand(1); |
10330 | |
10331 | if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) |
10332 | && Subtarget->hasMad64_32() && |
10333 | !VT.isVector() && VT.getScalarSizeInBits() > 32 && |
10334 | VT.getScalarSizeInBits() <= 64) { |
10335 | if (LHS.getOpcode() != ISD::MUL) |
10336 | std::swap(LHS, RHS); |
10337 | |
10338 | SDValue MulLHS = LHS.getOperand(0); |
10339 | SDValue MulRHS = LHS.getOperand(1); |
10340 | SDValue AddRHS = RHS; |
10341 | |
10342 | |
10343 | if (numBitsUnsigned(MulLHS, DAG) <= 32 && |
10344 | numBitsUnsigned(MulRHS, DAG) <= 32) { |
10345 | MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); |
10346 | MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); |
10347 | AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); |
10348 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); |
10349 | } |
10350 | |
10351 | if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { |
10352 | MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); |
10353 | MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); |
10354 | AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); |
10355 | return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); |
10356 | } |
10357 | |
10358 | return SDValue(); |
10359 | } |
10360 | |
10361 | if (SDValue V = reassociateScalarOps(N, DAG)) { |
10362 | return V; |
10363 | } |
10364 | |
10365 | if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG()) |
10366 | return SDValue(); |
10367 | |
10368 | |
10369 | |
10370 | unsigned Opc = LHS.getOpcode(); |
10371 | if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || |
10372 | Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) |
10373 | std::swap(RHS, LHS); |
10374 | |
10375 | Opc = RHS.getOpcode(); |
10376 | switch (Opc) { |
10377 | default: break; |
10378 | case ISD::ZERO_EXTEND: |
10379 | case ISD::SIGN_EXTEND: |
10380 | case ISD::ANY_EXTEND: { |
10381 | auto Cond = RHS.getOperand(0); |
10382 | |
10383 | |
10384 | if (!isBoolSGPR(Cond)) |
10385 | break; |
10386 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); |
10387 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; |
10388 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; |
10389 | return DAG.getNode(Opc, SL, VTList, Args); |
10390 | } |
10391 | case ISD::ADDCARRY: { |
10392 | |
10393 | auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); |
10394 | if (!C || C->getZExtValue() != 0) break; |
10395 | SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; |
10396 | return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); |
10397 | } |
10398 | } |
10399 | return SDValue(); |
10400 | } |
10401 | |
10402 | SDValue SITargetLowering::performSubCombine(SDNode *N, |
10403 | DAGCombinerInfo &DCI) const { |
10404 | SelectionDAG &DAG = DCI.DAG; |
10405 | EVT VT = N->getValueType(0); |
10406 | |
10407 | if (VT != MVT::i32) |
10408 | return SDValue(); |
10409 | |
10410 | SDLoc SL(N); |
10411 | SDValue LHS = N->getOperand(0); |
10412 | SDValue RHS = N->getOperand(1); |
10413 | |
10414 | |
10415 | |
10416 | unsigned Opc = RHS.getOpcode(); |
10417 | switch (Opc) { |
10418 | default: break; |
10419 | case ISD::ZERO_EXTEND: |
10420 | case ISD::SIGN_EXTEND: |
10421 | case ISD::ANY_EXTEND: { |
10422 | auto Cond = RHS.getOperand(0); |
10423 | |
10424 | |
10425 | if (!isBoolSGPR(Cond)) |
10426 | break; |
10427 | SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); |
10428 | SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; |
10429 | Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::ADDCARRY : ISD::SUBCARRY; |
10430 | return DAG.getNode(Opc, SL, VTList, Args); |
10431 | } |
10432 | } |
10433 | |
10434 | if (LHS.getOpcode() == ISD::SUBCARRY) { |
10435 | |
10436 | auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); |
10437 | if (!C || !C->isNullValue()) |
10438 | return SDValue(); |
10439 | SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; |
10440 | return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); |
10441 | } |
10442 | return SDValue(); |
10443 | } |
10444 | |
10445 | SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, |
10446 | DAGCombinerInfo &DCI) const { |
10447 | |
10448 | if (N->getValueType(0) != MVT::i32) |
10449 | return SDValue(); |
10450 | |
10451 | auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
10452 | if (!C || C->getZExtValue() != 0) |
10453 | return SDValue(); |
10454 | |
10455 | SelectionDAG &DAG = DCI.DAG; |
10456 | SDValue LHS = N->getOperand(0); |
10457 | |
10458 | |
10459 | |
10460 | unsigned LHSOpc = LHS.getOpcode(); |
10461 | unsigned Opc = N->getOpcode(); |
10462 | if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || |
10463 | (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { |
10464 | SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; |
10465 | return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); |
10466 | } |
10467 | return SDValue(); |
10468 | } |
10469 | |
10470 | SDValue SITargetLowering::performFAddCombine(SDNode *N, |
10471 | DAGCombinerInfo &DCI) const { |
10472 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
10473 | return SDValue(); |
10474 | |
10475 | SelectionDAG &DAG = DCI.DAG; |
10476 | EVT VT = N->getValueType(0); |
10477 | |
10478 | SDLoc SL(N); |
10479 | SDValue LHS = N->getOperand(0); |
10480 | SDValue RHS = N->getOperand(1); |
10481 | |
10482 | |
10483 | |
10484 | |
10485 | |
10486 | if (LHS.getOpcode() == ISD::FADD) { |
10487 | SDValue A = LHS.getOperand(0); |
10488 | if (A == LHS.getOperand(1)) { |
10489 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); |
10490 | if (FusedOp != 0) { |
10491 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
10492 | return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); |
10493 | } |
10494 | } |
10495 | } |
10496 | |
10497 | |
10498 | if (RHS.getOpcode() == ISD::FADD) { |
10499 | SDValue A = RHS.getOperand(0); |
10500 | if (A == RHS.getOperand(1)) { |
10501 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); |
10502 | if (FusedOp != 0) { |
10503 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
10504 | return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); |
10505 | } |
10506 | } |
10507 | } |
10508 | |
10509 | return SDValue(); |
10510 | } |
10511 | |
10512 | SDValue SITargetLowering::performFSubCombine(SDNode *N, |
10513 | DAGCombinerInfo &DCI) const { |
10514 | if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) |
10515 | return SDValue(); |
10516 | |
10517 | SelectionDAG &DAG = DCI.DAG; |
10518 | SDLoc SL(N); |
10519 | EVT VT = N->getValueType(0); |
10520 | assert(!VT.isVector()); |
10521 | |
10522 | |
10523 | |
10524 | |
10525 | |
10526 | |
10527 | SDValue LHS = N->getOperand(0); |
10528 | SDValue RHS = N->getOperand(1); |
10529 | if (LHS.getOpcode() == ISD::FADD) { |
10530 | |
10531 | SDValue A = LHS.getOperand(0); |
10532 | if (A == LHS.getOperand(1)) { |
10533 | unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); |
10534 | if (FusedOp != 0){ |
10535 | const SDValue Two = DAG.getConstantFP(2.0, SL, VT); |
10536 | SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); |
10537 | |
10538 | return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); |
10539 | } |
10540 | } |
10541 | } |
10542 | |
10543 | if (RHS.getOpcode() == ISD::FADD) { |
10544 | |
10545 | |
10546 | SDValue A = RHS.getOperand(0); |
10547 | if (A == RHS.getOperand(1)) { |
10548 | unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); |
10549 | if (FusedOp != 0){ |
10550 | const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); |
10551 | return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); |
10552 | } |
10553 | } |
10554 | } |
10555 | |
10556 | return SDValue(); |
10557 | } |
10558 | |
10559 | SDValue SITargetLowering::performFMACombine(SDNode *N, |
10560 | DAGCombinerInfo &DCI) const { |
10561 | SelectionDAG &DAG = DCI.DAG; |
10562 | EVT VT = N->getValueType(0); |
10563 | SDLoc SL(N); |
10564 | |
10565 | if (!Subtarget->hasDot7Insts() || VT != MVT::f32) |
10566 | return SDValue(); |
10567 | |
10568 | |
10569 | |
10570 | SDValue Op1 = N->getOperand(0); |
10571 | SDValue Op2 = N->getOperand(1); |
10572 | SDValue FMA = N->getOperand(2); |
10573 | |
10574 | if (FMA.getOpcode() != ISD::FMA || |
10575 | Op1.getOpcode() != ISD::FP_EXTEND || |
10576 | Op2.getOpcode() != ISD::FP_EXTEND) |
10577 | return SDValue(); |
10578 | |
10579 | |
10580 | |
10581 | |
10582 | const TargetOptions &Options = DAG.getTarget().Options; |
10583 | if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || |
10584 | (N->getFlags().hasAllowContract() && |
10585 | FMA->getFlags().hasAllowContract())) { |
10586 | Op1 = Op1.getOperand(0); |
10587 | Op2 = Op2.getOperand(0); |
10588 | if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
10589 | Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
10590 | return SDValue(); |
10591 | |
10592 | SDValue Vec1 = Op1.getOperand(0); |
10593 | SDValue Idx1 = Op1.getOperand(1); |
10594 | SDValue Vec2 = Op2.getOperand(0); |
10595 | |
10596 | SDValue FMAOp1 = FMA.getOperand(0); |
10597 | SDValue FMAOp2 = FMA.getOperand(1); |
10598 | SDValue FMAAcc = FMA.getOperand(2); |
10599 | |
10600 | if (FMAOp1.getOpcode() != ISD::FP_EXTEND || |
10601 | FMAOp2.getOpcode() != ISD::FP_EXTEND) |
10602 | return SDValue(); |
10603 | |
10604 | FMAOp1 = FMAOp1.getOperand(0); |
10605 | FMAOp2 = FMAOp2.getOperand(0); |
10606 | if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || |
10607 | FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) |
10608 | return SDValue(); |
10609 | |
10610 | SDValue Vec3 = FMAOp1.getOperand(0); |
10611 | SDValue Vec4 = FMAOp2.getOperand(0); |
10612 | SDValue Idx2 = FMAOp1.getOperand(1); |
10613 | |
10614 | if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) || |
10615 | |
10616 | Idx1 == Idx2) |
10617 | return SDValue(); |
10618 | |
10619 | if (Vec1 == Vec2 || Vec3 == Vec4) |
10620 | return SDValue(); |
10621 | |
10622 | if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16) |
10623 | return SDValue(); |
10624 | |
10625 | if ((Vec1 == Vec3 && Vec2 == Vec4) || |
10626 | (Vec1 == Vec4 && Vec2 == Vec3)) { |
10627 | return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc, |
10628 | DAG.getTargetConstant(0, SL, MVT::i1)); |
10629 | } |
10630 | } |
10631 | return SDValue(); |
10632 | } |
10633 | |
10634 | SDValue SITargetLowering::performSetCCCombine(SDNode *N, |
10635 | DAGCombinerInfo &DCI) const { |
10636 | SelectionDAG &DAG = DCI.DAG; |
10637 | SDLoc SL(N); |
10638 | |
10639 | SDValue LHS = N->getOperand(0); |
10640 | SDValue RHS = N->getOperand(1); |
10641 | EVT VT = LHS.getValueType(); |
10642 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); |
10643 | |
10644 | auto CRHS = dyn_cast<ConstantSDNode>(RHS); |
10645 | if (!CRHS) { |
10646 | CRHS = dyn_cast<ConstantSDNode>(LHS); |
10647 | if (CRHS) { |
10648 | std::swap(LHS, RHS); |
10649 | CC = getSetCCSwappedOperands(CC); |
10650 | } |
10651 | } |
10652 | |
10653 | if (CRHS) { |
10654 | if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && |
10655 | isBoolSGPR(LHS.getOperand(0))) { |
10656 | |
10657 | |
10658 | |
10659 | |
10660 | if ((CRHS->isAllOnesValue() && |
10661 | (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || |
10662 | (CRHS->isNullValue() && |
10663 | (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) |
10664 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), |
10665 | DAG.getConstant(-1, SL, MVT::i1)); |
10666 | if ((CRHS->isAllOnesValue() && |
10667 | (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || |
10668 | (CRHS->isNullValue() && |
10669 | (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) |
10670 | return LHS.getOperand(0); |
10671 | } |
10672 | |
10673 | uint64_t CRHSVal = CRHS->getZExtValue(); |
10674 | if ((CC == ISD::SETEQ || CC == ISD::SETNE) && |
10675 | LHS.getOpcode() == ISD::SELECT && |
10676 | isa<ConstantSDNode>(LHS.getOperand(1)) && |
10677 | isa<ConstantSDNode>(LHS.getOperand(2)) && |
10678 | LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) && |
10679 | isBoolSGPR(LHS.getOperand(0))) { |
10680 | |
10681 | |
10682 | |
10683 | |
10684 | |
10685 | uint64_t CT = LHS.getConstantOperandVal(1); |
10686 | uint64_t CF = LHS.getConstantOperandVal(2); |
10687 | |
10688 | if ((CF == CRHSVal && CC == ISD::SETEQ) || |
10689 | (CT == CRHSVal && CC == ISD::SETNE)) |
10690 | return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), |
10691 | DAG.getConstant(-1, SL, MVT::i1)); |
10692 | if ((CF == CRHSVal && CC == ISD::SETNE) || |
10693 | (CT == CRHSVal && CC == ISD::SETEQ)) |
10694 | return LHS.getOperand(0); |
10695 | } |
10696 | } |
10697 | |
10698 | if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && |
10699 | VT != MVT::f16)) |
10700 | return SDValue(); |
10701 | |
10702 | |
10703 | |
10704 | |
10705 | |
10706 | if ((CC == ISD::SETOEQ || CC == ISD::SETONE) && LHS.getOpcode() == ISD::FABS) { |
10707 | const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); |
10708 | if (!CRHS) |
10709 | return SDValue(); |
10710 | |
10711 | const APFloat &APF = CRHS->getValueAPF(); |
10712 | if (APF.isInfinity() && !APF.isNegative()) { |
10713 | const unsigned IsInfMask = SIInstrFlags::P_INFINITY | |
10714 | SIInstrFlags::N_INFINITY; |
10715 | const unsigned IsFiniteMask = SIInstrFlags::N_ZERO | |
10716 | SIInstrFlags::P_ZERO | |
10717 | SIInstrFlags::N_NORMAL | |
10718 | SIInstrFlags::P_NORMAL | |
10719 | SIInstrFlags::N_SUBNORMAL | |
10720 | SIInstrFlags::P_SUBNORMAL; |
10721 | unsigned Mask = CC == ISD::SETOEQ ? IsInfMask : IsFiniteMask; |
10722 | return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), |
10723 | DAG.getConstant(Mask, SL, MVT::i32)); |
10724 | } |
10725 | } |
10726 | |
10727 | return SDValue(); |
10728 | } |
10729 | |
10730 | SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, |
10731 | DAGCombinerInfo &DCI) const { |
10732 | SelectionDAG &DAG = DCI.DAG; |
10733 | SDLoc SL(N); |
10734 | unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; |
10735 | |
10736 | SDValue Src = N->getOperand(0); |
10737 | SDValue Shift = N->getOperand(0); |
10738 | |
10739 | |
10740 | if (Shift.getOpcode() == ISD::ZERO_EXTEND) |
10741 | Shift = Shift.getOperand(0); |
10742 | |
10743 | if (Shift.getOpcode() == ISD::SRL || Shift.getOpcode() == ISD::SHL) { |
10744 | |
10745 | |
10746 | |
10747 | |
10748 | |
10749 | if (auto *C = dyn_cast<ConstantSDNode>(Shift.getOperand(1))) { |
10750 | Shift = DAG.getZExtOrTrunc(Shift.getOperand(0), |
10751 | SDLoc(Shift.getOperand(0)), MVT::i32); |
10752 | |
10753 | unsigned ShiftOffset = 8 * Offset; |
10754 | if (Shift.getOpcode() == ISD::SHL) |
10755 | ShiftOffset -= C->getZExtValue(); |
10756 | else |
10757 | ShiftOffset += C->getZExtValue(); |
10758 | |
10759 | if (ShiftOffset < 32 && (ShiftOffset % 8) == 0) { |
10760 | return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + ShiftOffset / 8, SL, |
10761 | MVT::f32, Shift); |
10762 | } |
10763 | } |
10764 | } |
10765 | |
10766 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
10767 | APInt DemandedBits = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); |
10768 | if (TLI.SimplifyDemandedBits(Src, DemandedBits, DCI)) { |
10769 | |
10770 | |
10771 | if (N->getOpcode() != ISD::DELETED_NODE) |
10772 | DCI.AddToWorklist(N); |
10773 | return SDValue(N, 0); |
10774 | } |
10775 | |
10776 | |
10777 | if (SDValue DemandedSrc = |
10778 | TLI.SimplifyMultipleUseDemandedBits(Src, DemandedBits, DAG)) |
10779 | return DAG.getNode(N->getOpcode(), SL, MVT::f32, DemandedSrc); |
10780 | |
10781 | return SDValue(); |
10782 | } |
10783 | |
10784 | SDValue SITargetLowering::performClampCombine(SDNode *N, |
10785 | DAGCombinerInfo &DCI) const { |
10786 | ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); |
10787 | if (!CSrc) |
10788 | return SDValue(); |
10789 | |
10790 | const MachineFunction &MF = DCI.DAG.getMachineFunction(); |
10791 | const APFloat &F = CSrc->getValueAPF(); |
10792 | APFloat Zero = APFloat::getZero(F.getSemantics()); |
10793 | if (F < Zero || |
10794 | (F.isNaN() && MF.getInfo<SIMachineFunctionInfo>()->getMode().DX10Clamp)) { |
10795 | return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); |
10796 | } |
10797 | |
10798 | APFloat One(F.getSemantics(), "1.0"); |
10799 | if (F > One) |
10800 | return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); |
10801 | |
10802 | return SDValue(CSrc, 0); |
10803 | } |
10804 | |
10805 | |
10806 | SDValue SITargetLowering::PerformDAGCombine(SDNode *N, |
10807 | DAGCombinerInfo &DCI) const { |
10808 | if (getTargetMachine().getOptLevel() == CodeGenOpt::None) |
| 1 | Assuming the condition is false | |
|
| |
10809 | return SDValue(); |
10810 | switch (N->getOpcode()) { |
| 3 | | Control jumps to 'case FCANONICALIZE:' at line 10849 | |
|
10811 | case ISD::ADD: |
10812 | return performAddCombine(N, DCI); |
10813 | case ISD::SUB: |
10814 | return performSubCombine(N, DCI); |
10815 | case ISD::ADDCARRY: |
10816 | case ISD::SUBCARRY: |
10817 | return performAddCarrySubCarryCombine(N, DCI); |
10818 | case ISD::FADD: |
10819 | return performFAddCombine(N, DCI); |
10820 | case ISD::FSUB: |
10821 | return performFSubCombine(N, DCI); |
10822 | case ISD::SETCC: |
10823 | return performSetCCCombine(N, DCI); |
10824 | case ISD::FMAXNUM: |
10825 | case ISD::FMINNUM: |
10826 | case ISD::FMAXNUM_IEEE: |
10827 | case ISD::FMINNUM_IEEE: |
10828 | case ISD::SMAX: |
10829 | case ISD::SMIN: |
10830 | case ISD::UMAX: |
10831 | case ISD::UMIN: |
10832 | case AMDGPUISD::FMIN_LEGACY: |
10833 | case AMDGPUISD::FMAX_LEGACY: |
10834 | return performMinMaxCombine(N, DCI); |
10835 | case ISD::FMA: |
10836 | return performFMACombine(N, DCI); |
10837 | case ISD::AND: |
10838 | return performAndCombine(N, DCI); |
10839 | case ISD::OR: |
10840 | return performOrCombine(N, DCI); |
10841 | case ISD::XOR: |
10842 | return performXorCombine(N, DCI); |
10843 | case ISD::ZERO_EXTEND: |
10844 | return performZeroExtendCombine(N, DCI); |
10845 | case ISD::SIGN_EXTEND_INREG: |
10846 | return performSignExtendInRegCombine(N , DCI); |
10847 | case AMDGPUISD::FP_CLASS: |
10848 | return performClassCombine(N, DCI); |
10849 | case ISD::FCANONICALIZE: |
10850 | return performFCanonicalizeCombine(N, DCI); |
| 4 | | Calling 'SITargetLowering::performFCanonicalizeCombine' | |
|
10851 | case AMDGPUISD::RCP: |
10852 | return performRcpCombine(N, DCI); |
10853 | case AMDGPUISD::FRACT: |
10854 | case AMDGPUISD::RSQ: |
10855 | case AMDGPUISD::RCP_LEGACY: |
10856 | case AMDGPUISD::RCP_IFLAG: |
10857 | case AMDGPUISD::RSQ_CLAMP: |
10858 | case AMDGPUISD::LDEXP: { |
10859 | |
10860 | SDValue Src = N->getOperand(0); |
10861 | if (Src.isUndef()) |
10862 | return Src; |
10863 | break; |
10864 | } |
10865 | case ISD::SINT_TO_FP: |
10866 | case ISD::UINT_TO_FP: |
10867 | return performUCharToFloatCombine(N, DCI); |
10868 | case AMDGPUISD::CVT_F32_UBYTE0: |
10869 | case AMDGPUISD::CVT_F32_UBYTE1: |
10870 | case AMDGPUISD::CVT_F32_UBYTE2: |
10871 | case AMDGPUISD::CVT_F32_UBYTE3: |
10872 | return performCvtF32UByteNCombine(N, DCI); |
10873 | case AMDGPUISD::FMED3: |
10874 | return performFMed3Combine(N, DCI); |
10875 | case AMDGPUISD::CVT_PKRTZ_F16_F32: |
10876 | return performCvtPkRTZCombine(N, DCI); |
10877 | case AMDGPUISD::CLAMP: |
10878 | return performClampCombine(N, DCI); |
10879 | case ISD::SCALAR_TO_VECTOR: { |
10880 | SelectionDAG &DAG = DCI.DAG; |
10881 | EVT VT = N->getValueType(0); |
10882 | |
10883 | |
10884 | if (VT == MVT::v2i16 || VT == MVT::v2f16) { |
10885 | SDLoc SL(N); |
10886 | SDValue Src = N->getOperand(0); |
10887 | EVT EltVT = Src.getValueType(); |
10888 | if (EltVT == MVT::f16) |
10889 | Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); |
10890 | |
10891 | SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); |
10892 | return DAG.getNode(ISD::BITCAST, SL, VT, Ext); |
10893 | } |
10894 | |
10895 | break; |
10896 | } |
10897 | case ISD::EXTRACT_VECTOR_ELT: |
10898 | return performExtractVectorEltCombine(N, DCI); |
10899 | case ISD::INSERT_VECTOR_ELT: |
10900 | return performInsertVectorEltCombine(N, DCI); |
10901 | case ISD::LOAD: { |
10902 | if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI)) |
10903 | return Widended; |
10904 | LLVM_FALLTHROUGH; |
10905 | } |
10906 | default: { |
10907 | if (!DCI.isBeforeLegalize()) { |
10908 | if (MemSDNode *MemNode = dyn_cast<MemSDNode>(N)) |
10909 | return performMemSDNodeCombine(MemNode, DCI); |
10910 | } |
10911 | |
10912 | break; |
10913 | } |
10914 | } |
10915 | |
10916 | return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); |
10917 | } |
10918 | |
10919 | |
10920 | static unsigned SubIdx2Lane(unsigned Idx) { |
10921 | switch (Idx) { |
10922 | default: return ~0u; |
10923 | case AMDGPU::sub0: return 0; |
10924 | case AMDGPU::sub1: return 1; |
10925 | case AMDGPU::sub2: return 2; |
10926 | case AMDGPU::sub3: return 3; |
10927 | case AMDGPU::sub4: return 4; |
10928 | } |
10929 | } |
10930 | |
10931 | |
10932 | SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, |
10933 | SelectionDAG &DAG) const { |
10934 | unsigned Opcode = Node->getMachineOpcode(); |
10935 | |
10936 | |
10937 | int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1; |
10938 | if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx)) |
10939 | return Node; |
10940 | |
10941 | SDNode *Users[5] = { nullptr }; |
10942 | unsigned Lane = 0; |
10943 | unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1; |
10944 | unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); |
10945 | unsigned NewDmask = 0; |
10946 | unsigned TFEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::tfe) - 1; |
10947 | unsigned LWEIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::lwe) - 1; |
10948 | bool UsesTFC = ((int(TFEIdx) >= 0 && Node->getConstantOperandVal(TFEIdx)) || |
10949 | Node->getConstantOperandVal(LWEIdx)) ? 1 : 0; |
10950 | unsigned TFCLane = 0; |
10951 | bool HasChain = Node->getNumValues() > 1; |
10952 | |
10953 | if (OldDmask == 0) { |
10954 | |
10955 | return Node; |
10956 | } |
10957 | |
10958 | unsigned OldBitsSet = countPopulation(OldDmask); |
10959 | |
10960 | if (UsesTFC) { |
10961 | TFCLane = OldBitsSet; |
10962 | } |
10963 | |
10964 | |
10965 | for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); |
10966 | I != E; ++I) { |
10967 | |
10968 | |
10969 | if (I.getUse().getResNo() != 0) |
10970 | continue; |
10971 | |
10972 | |
10973 | if (!I->isMachineOpcode() || |
10974 | I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) |
10975 | return Node; |
10976 | |
10977 | |
10978 | |
10979 | |
10980 | |
10981 | Lane = SubIdx2Lane(I->getConstantOperandVal(1)); |
10982 | if (Lane == ~0u) |
10983 | return Node; |
10984 | |
10985 | |
10986 | if (UsesTFC && Lane == TFCLane) { |
10987 | Users[Lane] = *I; |
10988 | } else { |
10989 | |
10990 | unsigned Comp; |
10991 | for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) { |
10992 | Comp = countTrailingZeros(Dmask); |
10993 | Dmask &= ~(1 << Comp); |
10994 | } |
10995 | |
10996 | |
10997 | if (Users[Lane]) |
10998 | return Node; |
10999 | |
11000 | Users[Lane] = *I; |
11001 | NewDmask |= 1 << Comp; |
11002 | } |
11003 | } |
11004 | |
11005 | |
11006 | bool NoChannels = !NewDmask; |
11007 | if (NoChannels) { |
11008 | if (!UsesTFC) { |
11009 | |
11010 | return Node; |
11011 | } |
11012 | |
11013 | if (OldBitsSet == 1) |
11014 | return Node; |
11015 | |
11016 | NewDmask = 1; |
11017 | } |
11018 | |
11019 | if (NewDmask == OldDmask) |
11020 | return Node; |
11021 | |
11022 | unsigned BitsSet = countPopulation(NewDmask); |
11023 | |
11024 | |
11025 | |
11026 | |
11027 | |
11028 | unsigned NewChannels = BitsSet + UsesTFC; |
11029 | |
11030 | int NewOpcode = |
11031 | AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), NewChannels); |
11032 | assert(NewOpcode != -1 && |
11033 | NewOpcode != static_cast<int>(Node->getMachineOpcode()) && |
11034 | "failed to find equivalent MIMG op"); |
11035 | |
11036 | |
11037 | SmallVector<SDValue, 12> Ops; |
11038 | Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); |
11039 | Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); |
11040 | Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); |
11041 | |
11042 | MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); |
11043 | |
11044 | MVT ResultVT = NewChannels == 1 ? |
11045 | SVT : MVT::getVectorVT(SVT, NewChannels == 3 ? 4 : |
11046 | NewChannels == 5 ? 8 : NewChannels); |
11047 | SDVTList NewVTList = HasChain ? |
11048 | DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); |
11049 | |
11050 | |
11051 | MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), |
11052 | NewVTList, Ops); |
11053 | |
11054 | if (HasChain) { |
11055 | |
11056 | DAG.setNodeMemRefs(NewNode, Node->memoperands()); |
11057 | DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); |
11058 | } |
11059 | |
11060 | if (NewChannels == 1) { |
11061 | assert(Node->hasNUsesOfValue(1, 0)); |
11062 | SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, |
11063 | SDLoc(Node), Users[Lane]->getValueType(0), |
11064 | SDValue(NewNode, 0)); |
11065 | DAG.ReplaceAllUsesWith(Users[Lane], Copy); |
11066 | return nullptr; |
11067 | } |
11068 | |
11069 | |
11070 | for (unsigned i = 0, Idx = AMDGPU::sub0; i < 5; ++i) { |
11071 | SDNode *User = Users[i]; |
11072 | if (!User) { |
11073 | |
11074 | |
11075 | if (i || !NoChannels) |
11076 | continue; |
11077 | } else { |
11078 | SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); |
11079 | DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); |
11080 | } |
11081 | |
11082 | switch (Idx) { |
11083 | default: break; |
11084 | case AMDGPU::sub0: Idx = AMDGPU::sub1; break; |
11085 | case AMDGPU::sub1: Idx = AMDGPU::sub2; break; |
11086 | case AMDGPU::sub2: Idx = AMDGPU::sub3; break; |
11087 | case AMDGPU::sub3: Idx = AMDGPU::sub4; break; |
11088 | } |
11089 | } |
11090 | |
11091 | DAG.RemoveDeadNode(Node); |
11092 | return nullptr; |
11093 | } |
11094 | |
11095 | static bool isFrameIndexOp(SDValue Op) { |
11096 | if (Op.getOpcode() == ISD::AssertZext) |
11097 | Op = Op.getOperand(0); |
11098 | |
11099 | return isa<FrameIndexSDNode>(Op); |
11100 | } |
11101 | |
11102 | |
11103 | |
11104 | |
11105 | SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, |
11106 | SelectionDAG &DAG) const { |
11107 | if (Node->getOpcode() == ISD::CopyToReg) { |
11108 | RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); |
11109 | SDValue SrcVal = Node->getOperand(2); |
11110 | |
11111 | |
11112 | |
11113 | if (SrcVal.getValueType() == MVT::i1 && DestReg->getReg().isPhysical()) { |
11114 | SDLoc SL(Node); |
11115 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
11116 | SDValue VReg = DAG.getRegister( |
11117 | MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); |
11118 | |
11119 | SDNode *Glued = Node->getGluedNode(); |
11120 | SDValue ToVReg |
11121 | = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, |
11122 | SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); |
11123 | SDValue ToResultReg |
11124 | = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), |
11125 | VReg, ToVReg.getValue(1)); |
11126 | DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); |
11127 | DAG.RemoveDeadNode(Node); |
11128 | return ToResultReg.getNode(); |
11129 | } |
11130 | } |
11131 | |
11132 | SmallVector<SDValue, 8> Ops; |
11133 | for (unsigned i = 0; i < Node->getNumOperands(); ++i) { |
11134 | if (!isFrameIndexOp(Node->getOperand(i))) { |
11135 | Ops.push_back(Node->getOperand(i)); |
11136 | continue; |
11137 | } |
11138 | |
11139 | SDLoc DL(Node); |
11140 | Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, |
11141 | Node->getOperand(i).getValueType(), |
11142 | Node->getOperand(i)), 0)); |
11143 | } |
11144 | |
11145 | return DAG.UpdateNodeOperands(Node, Ops); |
11146 | } |
11147 | |
11148 | |
11149 | |
11150 | SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, |
11151 | SelectionDAG &DAG) const { |
11152 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
11153 | unsigned Opcode = Node->getMachineOpcode(); |
11154 | |
11155 | if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && |
11156 | !TII->isGather4(Opcode) && |
11157 | AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) != -1) { |
11158 | return adjustWritemask(Node, DAG); |
11159 | } |
11160 | |
11161 | if (Opcode == AMDGPU::INSERT_SUBREG || |
11162 | Opcode == AMDGPU::REG_SEQUENCE) { |
11163 | legalizeTargetIndependentNode(Node, DAG); |
11164 | return Node; |
11165 | } |
11166 | |
11167 | switch (Opcode) { |
11168 | case AMDGPU::V_DIV_SCALE_F32_e64: |
11169 | case AMDGPU::V_DIV_SCALE_F64_e64: { |
11170 | |
11171 | |
11172 | |
11173 | SDValue Src0 = Node->getOperand(1); |
11174 | SDValue Src1 = Node->getOperand(3); |
11175 | SDValue Src2 = Node->getOperand(5); |
11176 | |
11177 | if ((Src0.isMachineOpcode() && |
11178 | Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && |
11179 | (Src0 == Src1 || Src0 == Src2)) |
11180 | break; |
11181 | |
11182 | MVT VT = Src0.getValueType().getSimpleVT(); |
11183 | const TargetRegisterClass *RC = |
11184 | getRegClassFor(VT, Src0.getNode()->isDivergent()); |
11185 | |
11186 | MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); |
11187 | SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); |
11188 | |
11189 | SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), |
11190 | UndefReg, Src0, SDValue()); |
11191 | |
11192 | |
11193 | |
11194 | if (Src0.isMachineOpcode() && |
11195 | Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { |
11196 | if (Src1.isMachineOpcode() && |
11197 | Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) |
11198 | Src0 = Src1; |
11199 | else if (Src2.isMachineOpcode() && |
11200 | Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) |
11201 | Src0 = Src2; |
11202 | else { |
11203 | assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF); |
11204 | Src0 = UndefReg; |
11205 | Src1 = UndefReg; |
11206 | } |
11207 | } else |
11208 | break; |
11209 | |
11210 | SmallVector<SDValue, 9> Ops(Node->op_begin(), Node->op_end()); |
11211 | Ops[1] = Src0; |
11212 | Ops[3] = Src1; |
11213 | Ops[5] = Src2; |
11214 | Ops.push_back(ImpDef.getValue(1)); |
11215 | return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); |
11216 | } |
11217 | default: |
11218 | break; |
11219 | } |
11220 | |
11221 | return Node; |
11222 | } |
11223 | |
11224 | |
11225 | |
11226 | |
11227 | |
11228 | void SITargetLowering::AddIMGInit(MachineInstr &MI) const { |
11229 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
11230 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
11231 | MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); |
11232 | MachineBasicBlock &MBB = *MI.getParent(); |
11233 | |
11234 | MachineOperand *TFE = TII->getNamedOperand(MI, AMDGPU::OpName::tfe); |
11235 | MachineOperand *LWE = TII->getNamedOperand(MI, AMDGPU::OpName::lwe); |
11236 | MachineOperand *D16 = TII->getNamedOperand(MI, AMDGPU::OpName::d16); |
11237 | |
11238 | if (!TFE && !LWE) |
11239 | return; |
11240 | |
11241 | unsigned TFEVal = TFE ? TFE->getImm() : 0; |
11242 | unsigned LWEVal = LWE->getImm(); |
11243 | unsigned D16Val = D16 ? D16->getImm() : 0; |
11244 | |
11245 | if (!TFEVal && !LWEVal) |
11246 | return; |
11247 | |
11248 | |
11249 | |
11250 | |
11251 | |
11252 | const DebugLoc &DL = MI.getDebugLoc(); |
11253 | |
11254 | int DstIdx = |
11255 | AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); |
11256 | |
11257 | |
11258 | MachineOperand *MO_Dmask = TII->getNamedOperand(MI, AMDGPU::OpName::dmask); |
11259 | |
11260 | |
11261 | assert(MO_Dmask && "Expected dmask operand in instruction"); |
11262 | |
11263 | unsigned dmask = MO_Dmask->getImm(); |
11264 | |
11265 | |
11266 | unsigned ActiveLanes = TII->isGather4(MI) ? 4 : countPopulation(dmask); |
11267 | |
11268 | bool Packed = !Subtarget->hasUnpackedD16VMem(); |
11269 | |
11270 | unsigned InitIdx = |
11271 | D16Val && Packed ? ((ActiveLanes + 1) >> 1) + 1 : ActiveLanes + 1; |
11272 | |
11273 | |
11274 | |
11275 | |
11276 | uint32_t DstSize = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32; |
11277 | if (DstSize < InitIdx) |
11278 | return; |
11279 | |
11280 | |
11281 | Register PrevDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); |
11282 | unsigned NewDst = 0; |
11283 | |
11284 | |
11285 | |
11286 | |
11287 | unsigned SizeLeft = Subtarget->usePRTStrictNull() ? InitIdx : 1; |
11288 | unsigned CurrIdx = Subtarget->usePRTStrictNull() ? 0 : (InitIdx - 1); |
11289 | |
11290 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::IMPLICIT_DEF), PrevDst); |
11291 | for (; SizeLeft; SizeLeft--, CurrIdx++) { |
11292 | NewDst = MRI.createVirtualRegister(TII->getOpRegClass(MI, DstIdx)); |
11293 | |
11294 | Register SubReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
11295 | BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), SubReg) |
11296 | .addImm(0); |
11297 | |
11298 | BuildMI(MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewDst) |
11299 | .addReg(PrevDst) |
11300 | .addReg(SubReg) |
11301 | .addImm(SIRegisterInfo::getSubRegFromChannel(CurrIdx)); |
11302 | |
11303 | PrevDst = NewDst; |
11304 | } |
11305 | |
11306 | |
11307 | MI.addOperand(MachineOperand::CreateReg(NewDst, false, true)); |
11308 | |
11309 | |
11310 | MI.tieOperands(DstIdx, MI.getNumOperands() - 1); |
11311 | } |
11312 | |
11313 | |
11314 | |
11315 | void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, |
11316 | SDNode *Node) const { |
11317 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
11318 | |
11319 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
11320 | |
11321 | if (TII->isVOP3(MI.getOpcode())) { |
11322 | |
11323 | TII->legalizeOperandsVOP3(MRI, MI); |
11324 | |
11325 | |
11326 | |
11327 | |
11328 | if (const MCOperandInfo *OpInfo = MI.getDesc().OpInfo) { |
11329 | unsigned Opc = MI.getOpcode(); |
11330 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
11331 | for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), |
11332 | AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) }) { |
11333 | if (I == -1) |
11334 | break; |
11335 | MachineOperand &Op = MI.getOperand(I); |
11336 | if ((OpInfo[I].RegClass != llvm::AMDGPU::AV_64RegClassID && |
11337 | OpInfo[I].RegClass != llvm::AMDGPU::AV_32RegClassID) || |
11338 | !Op.getReg().isVirtual() || !TRI->isAGPR(MRI, Op.getReg())) |
11339 | continue; |
11340 | auto *Src = MRI.getUniqueVRegDef(Op.getReg()); |
11341 | if (!Src || !Src->isCopy() || |
11342 | !TRI->isSGPRReg(MRI, Src->getOperand(1).getReg())) |
11343 | continue; |
11344 | auto *RC = TRI->getRegClassForReg(MRI, Op.getReg()); |
11345 | auto *NewRC = TRI->getEquivalentVGPRClass(RC); |
11346 | |
11347 | |
11348 | |
11349 | MRI.setRegClass(Op.getReg(), NewRC); |
11350 | } |
11351 | } |
11352 | |
11353 | return; |
11354 | } |
11355 | |
11356 | |
11357 | int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); |
11358 | if (NoRetAtomicOp != -1) { |
11359 | if (!Node->hasAnyUseOfValue(0)) { |
11360 | int CPolIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), |
11361 | AMDGPU::OpName::cpol); |
11362 | if (CPolIdx != -1) { |
11363 | MachineOperand &CPol = MI.getOperand(CPolIdx); |
11364 | CPol.setImm(CPol.getImm() & ~AMDGPU::CPol::GLC); |
11365 | } |
11366 | MI.RemoveOperand(0); |
11367 | MI.setDesc(TII->get(NoRetAtomicOp)); |
11368 | return; |
11369 | } |
11370 | |
11371 | |
11372 | |
11373 | |
11374 | |
11375 | |
11376 | |
11377 | if ((Node->hasNUsesOfValue(1, 0) && |
11378 | Node->use_begin()->isMachineOpcode() && |
11379 | Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && |
11380 | !Node->use_begin()->hasAnyUseOfValue(0))) { |
11381 | Register Def = MI.getOperand(0).getReg(); |
11382 | |
11383 | |
11384 | MI.setDesc(TII->get(NoRetAtomicOp)); |
11385 | MI.RemoveOperand(0); |
11386 | |
11387 | |
11388 | |
11389 | |
11390 | |
11391 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), |
11392 | TII->get(AMDGPU::IMPLICIT_DEF), Def); |
11393 | } |
11394 | return; |
11395 | } |
11396 | |
11397 | if (TII->isMIMG(MI) && !MI.mayStore()) |
11398 | AddIMGInit(MI); |
11399 | } |
11400 | |
11401 | static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, |
11402 | uint64_t Val) { |
11403 | SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); |
11404 | return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); |
11405 | } |
11406 | |
11407 | MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, |
11408 | const SDLoc &DL, |
11409 | SDValue Ptr) const { |
11410 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
11411 | |
11412 | |
11413 | |
11414 | |
11415 | const SDValue Ops0[] = { |
11416 | DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), |
11417 | buildSMovImm32(DAG, DL, 0), |
11418 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
11419 | buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), |
11420 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) |
11421 | }; |
11422 | |
11423 | SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, |
11424 | MVT::v2i32, Ops0), 0); |
11425 | |
11426 | |
11427 | const SDValue Ops1[] = { |
11428 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), |
11429 | Ptr, |
11430 | DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), |
11431 | SubRegHi, |
11432 | DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) |
11433 | }; |
11434 | |
11435 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); |
11436 | } |
11437 | |
11438 | |
11439 | |
11440 | |
11441 | |
11442 | MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, |
11443 | SDValue Ptr, uint32_t RsrcDword1, |
11444 | uint64_t RsrcDword2And3) const { |
11445 | SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); |
11446 | SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); |
11447 | if (RsrcDword1) { |
11448 | PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, |
11449 | DAG.getConstant(RsrcDword1, DL, MVT::i32)), |
11450 | 0); |
11451 | } |
11452 | |
11453 | SDValue DataLo = buildSMovImm32(DAG, DL, |
11454 | RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); |
11455 | SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); |
11456 | |
11457 | const SDValue Ops[] = { |
11458 | DAG.getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32), |
11459 | PtrLo, |
11460 | DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), |
11461 | PtrHi, |
11462 | DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), |
11463 | DataLo, |
11464 | DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), |
11465 | DataHi, |
11466 | DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) |
11467 | }; |
11468 | |
11469 | return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); |
11470 | } |
11471 | |
11472 | |
11473 | |
11474 | |
11475 | |
11476 | std::pair<unsigned, const TargetRegisterClass *> |
11477 | SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_, |
11478 | StringRef Constraint, |
11479 | MVT VT) const { |
11480 | const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(TRI_); |
11481 | |
11482 | const TargetRegisterClass *RC = nullptr; |
11483 | if (Constraint.size() == 1) { |
11484 | const unsigned BitWidth = VT.getSizeInBits(); |
11485 | switch (Constraint[0]) { |
11486 | default: |
11487 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
11488 | case 's': |
11489 | case 'r': |
11490 | switch (BitWidth) { |
11491 | case 16: |
11492 | RC = &AMDGPU::SReg_32RegClass; |
11493 | break; |
11494 | case 64: |
11495 | RC = &AMDGPU::SGPR_64RegClass; |
11496 | break; |
11497 | default: |
11498 | RC = SIRegisterInfo::getSGPRClassForBitWidth(BitWidth); |
11499 | if (!RC) |
11500 | return std::make_pair(0U, nullptr); |
11501 | break; |
11502 | } |
11503 | break; |
11504 | case 'v': |
11505 | switch (BitWidth) { |
11506 | case 16: |
11507 | RC = &AMDGPU::VGPR_32RegClass; |
11508 | break; |
11509 | default: |
11510 | RC = TRI->getVGPRClassForBitWidth(BitWidth); |
11511 | if (!RC) |
11512 | return std::make_pair(0U, nullptr); |
11513 | break; |
11514 | } |
11515 | break; |
11516 | case 'a': |
11517 | if (!Subtarget->hasMAIInsts()) |
11518 | break; |
11519 | switch (BitWidth) { |
11520 | case 16: |
11521 | RC = &AMDGPU::AGPR_32RegClass; |
11522 | break; |
11523 | default: |
11524 | RC = TRI->getAGPRClassForBitWidth(BitWidth); |
11525 | if (!RC) |
11526 | return std::make_pair(0U, nullptr); |
11527 | break; |
11528 | } |
11529 | break; |
11530 | } |
11531 | |
11532 | |
11533 | if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || |
11534 | VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) |
11535 | return std::make_pair(0U, RC); |
11536 | } |
11537 | |
11538 | if (Constraint.size() > 1) { |
11539 | if (Constraint[1] == 'v') { |
11540 | RC = &AMDGPU::VGPR_32RegClass; |
11541 | } else if (Constraint[1] == 's') { |
11542 | RC = &AMDGPU::SGPR_32RegClass; |
11543 | } else if (Constraint[1] == 'a') { |
11544 | RC = &AMDGPU::AGPR_32RegClass; |
11545 | } |
11546 | |
11547 | if (RC) { |
11548 | uint32_t Idx; |
11549 | bool Failed = Constraint.substr(2).getAsInteger(10, Idx); |
11550 | if (!Failed && Idx < RC->getNumRegs()) |
11551 | return std::make_pair(RC->getRegister(Idx), RC); |
11552 | } |
11553 | } |
11554 | |
11555 | |
11556 | return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); |
11557 | } |
11558 | |
11559 | static bool isImmConstraint(StringRef Constraint) { |
11560 | if (Constraint.size() == 1) { |
11561 | switch (Constraint[0]) { |
11562 | default: break; |
11563 | case 'I': |
11564 | case 'J': |
11565 | case 'A': |
11566 | case 'B': |
11567 | case 'C': |
11568 | return true; |
11569 | } |
11570 | } else if (Constraint == "DA" || |
11571 | Constraint == "DB") { |
11572 | return true; |
11573 | } |
11574 | return false; |
11575 | } |
11576 | |
11577 | SITargetLowering::ConstraintType |
11578 | SITargetLowering::getConstraintType(StringRef Constraint) const { |
11579 | if (Constraint.size() == 1) { |
11580 | switch (Constraint[0]) { |
11581 | default: break; |
11582 | case 's': |
11583 | case 'v': |
11584 | case 'a': |
11585 | return C_RegisterClass; |
11586 | } |
11587 | } |
11588 | if (isImmConstraint(Constraint)) { |
11589 | return C_Other; |
11590 | } |
11591 | return TargetLowering::getConstraintType(Constraint); |
11592 | } |
11593 | |
11594 | static uint64_t clearUnusedBits(uint64_t Val, unsigned Size) { |
11595 | if (!AMDGPU::isInlinableIntLiteral(Val)) { |
11596 | Val = Val & maskTrailingOnes<uint64_t>(Size); |
11597 | } |
11598 | return Val; |
11599 | } |
11600 | |
11601 | void SITargetLowering::LowerAsmOperandForConstraint(SDValue Op, |
11602 | std::string &Constraint, |
11603 | std::vector<SDValue> &Ops, |
11604 | SelectionDAG &DAG) const { |
11605 | if (isImmConstraint(Constraint)) { |
11606 | uint64_t Val; |
11607 | if (getAsmOperandConstVal(Op, Val) && |
11608 | checkAsmConstraintVal(Op, Constraint, Val)) { |
11609 | Val = clearUnusedBits(Val, Op.getScalarValueSizeInBits()); |
11610 | Ops.push_back(DAG.getTargetConstant(Val, SDLoc(Op), MVT::i64)); |
11611 | } |
11612 | } else { |
11613 | TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); |
11614 | } |
11615 | } |
11616 | |
11617 | bool SITargetLowering::getAsmOperandConstVal(SDValue Op, uint64_t &Val) const { |
11618 | unsigned Size = Op.getScalarValueSizeInBits(); |
11619 | if (Size > 64) |
11620 | return false; |
11621 | |
11622 | if (Size == 16 && !Subtarget->has16BitInsts()) |
11623 | return false; |
11624 | |
11625 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { |
11626 | Val = C->getSExtValue(); |
11627 | return true; |
11628 | } |
11629 | if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { |
11630 | Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); |
11631 | return true; |
11632 | } |
11633 | if (BuildVectorSDNode *V = dyn_cast<BuildVectorSDNode>(Op)) { |
11634 | if (Size != 16 || Op.getNumOperands() != 2) |
11635 | return false; |
11636 | if (Op.getOperand(0).isUndef() || Op.getOperand(1).isUndef()) |
11637 | return false; |
11638 | if (ConstantSDNode *C = V->getConstantSplatNode()) { |
11639 | Val = C->getSExtValue(); |
11640 | return true; |
11641 | } |
11642 | if (ConstantFPSDNode *C = V->getConstantFPSplatNode()) { |
11643 | Val = C->getValueAPF().bitcastToAPInt().getSExtValue(); |
11644 | return true; |
11645 | } |
11646 | } |
11647 | |
11648 | return false; |
11649 | } |
11650 | |
11651 | bool SITargetLowering::checkAsmConstraintVal(SDValue Op, |
11652 | const std::string &Constraint, |
11653 | uint64_t Val) const { |
11654 | if (Constraint.size() == 1) { |
11655 | switch (Constraint[0]) { |
11656 | case 'I': |
11657 | return AMDGPU::isInlinableIntLiteral(Val); |
11658 | case 'J': |
11659 | return isInt<16>(Val); |
11660 | case 'A': |
11661 | return checkAsmConstraintValA(Op, Val); |
11662 | case 'B': |
11663 | return isInt<32>(Val); |
11664 | case 'C': |
11665 | return isUInt<32>(clearUnusedBits(Val, Op.getScalarValueSizeInBits())) || |
11666 | AMDGPU::isInlinableIntLiteral(Val); |
11667 | default: |
11668 | break; |
11669 | } |
11670 | } else if (Constraint.size() == 2) { |
11671 | if (Constraint == "DA") { |
11672 | int64_t HiBits = static_cast<int32_t>(Val >> 32); |
11673 | int64_t LoBits = static_cast<int32_t>(Val); |
11674 | return checkAsmConstraintValA(Op, HiBits, 32) && |
11675 | checkAsmConstraintValA(Op, LoBits, 32); |
11676 | } |
11677 | if (Constraint == "DB") { |
11678 | return true; |
11679 | } |
11680 | } |
11681 | llvm_unreachable("Invalid asm constraint"); |
11682 | } |
11683 | |
11684 | bool SITargetLowering::checkAsmConstraintValA(SDValue Op, |
11685 | uint64_t Val, |
11686 | unsigned MaxSize) const { |
11687 | unsigned Size = std::min<unsigned>(Op.getScalarValueSizeInBits(), MaxSize); |
11688 | bool HasInv2Pi = Subtarget->hasInv2PiInlineImm(); |
11689 | if ((Size == 16 && AMDGPU::isInlinableLiteral16(Val, HasInv2Pi)) || |
11690 | (Size == 32 && AMDGPU::isInlinableLiteral32(Val, HasInv2Pi)) || |
11691 | (Size == 64 && AMDGPU::isInlinableLiteral64(Val, HasInv2Pi))) { |
11692 | return true; |
11693 | } |
11694 | return false; |
11695 | } |
11696 | |
11697 | static int getAlignedAGPRClassID(unsigned UnalignedClassID) { |
11698 | switch (UnalignedClassID) { |
11699 | case AMDGPU::VReg_64RegClassID: |
11700 | return AMDGPU::VReg_64_Align2RegClassID; |
11701 | case AMDGPU::VReg_96RegClassID: |
11702 | return AMDGPU::VReg_96_Align2RegClassID; |
11703 | case AMDGPU::VReg_128RegClassID: |
11704 | return AMDGPU::VReg_128_Align2RegClassID; |
11705 | case AMDGPU::VReg_160RegClassID: |
11706 | return AMDGPU::VReg_160_Align2RegClassID; |
11707 | case AMDGPU::VReg_192RegClassID: |
11708 | return AMDGPU::VReg_192_Align2RegClassID; |
11709 | case AMDGPU::VReg_224RegClassID: |
11710 | return AMDGPU::VReg_224_Align2RegClassID; |
11711 | case AMDGPU::VReg_256RegClassID: |
11712 | return AMDGPU::VReg_256_Align2RegClassID; |
11713 | case AMDGPU::VReg_512RegClassID: |
11714 | return AMDGPU::VReg_512_Align2RegClassID; |
11715 | case AMDGPU::VReg_1024RegClassID: |
11716 | return AMDGPU::VReg_1024_Align2RegClassID; |
11717 | case AMDGPU::AReg_64RegClassID: |
11718 | return AMDGPU::AReg_64_Align2RegClassID; |
11719 | case AMDGPU::AReg_96RegClassID: |
11720 | return AMDGPU::AReg_96_Align2RegClassID; |
11721 | case AMDGPU::AReg_128RegClassID: |
11722 | return AMDGPU::AReg_128_Align2RegClassID; |
11723 | case AMDGPU::AReg_160RegClassID: |
11724 | return AMDGPU::AReg_160_Align2RegClassID; |
11725 | case AMDGPU::AReg_192RegClassID: |
11726 | return AMDGPU::AReg_192_Align2RegClassID; |
11727 | case AMDGPU::AReg_256RegClassID: |
11728 | return AMDGPU::AReg_256_Align2RegClassID; |
11729 | case AMDGPU::AReg_512RegClassID: |
11730 | return AMDGPU::AReg_512_Align2RegClassID; |
11731 | case AMDGPU::AReg_1024RegClassID: |
11732 | return AMDGPU::AReg_1024_Align2RegClassID; |
11733 | default: |
11734 | return -1; |
11735 | } |
11736 | } |
11737 | |
11738 | |
11739 | |
11740 | |
11741 | void SITargetLowering::finalizeLowering(MachineFunction &MF) const { |
11742 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
11743 | SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
11744 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
11745 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
11746 | const SIInstrInfo *TII = ST.getInstrInfo(); |
11747 | |
11748 | if (Info->isEntryFunction()) { |
11749 | |
11750 | reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); |
11751 | } |
11752 | |
11753 | assert(!TRI->isSubRegister(Info->getScratchRSrcReg(), |
11754 | Info->getStackPtrOffsetReg())); |
11755 | if (Info->getStackPtrOffsetReg() != AMDGPU::SP_REG) |
11756 | MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); |
11757 | |
11758 | |
11759 | |
11760 | if (Info->getScratchRSrcReg() != AMDGPU::PRIVATE_RSRC_REG) |
11761 | MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); |
11762 | |
11763 | if (Info->getFrameOffsetReg() != AMDGPU::FP_REG) |
11764 | MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); |
11765 | |
11766 | Info->limitOccupancy(MF); |
11767 | |
11768 | if (ST.isWave32() && !MF.empty()) { |
11769 | for (auto &MBB : MF) { |
11770 | for (auto &MI : MBB) { |
11771 | TII->fixImplicitOperands(MI); |
11772 | } |
11773 | } |
11774 | } |
11775 | |
11776 | |
11777 | |
11778 | |
11779 | |
11780 | |
11781 | if (ST.needsAlignedVGPRs()) { |
11782 | for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { |
11783 | const Register Reg = Register::index2VirtReg(I); |
11784 | const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg); |
11785 | if (!RC) |
11786 | continue; |
11787 | int NewClassID = getAlignedAGPRClassID(RC->getID()); |
11788 | if (NewClassID != -1) |
11789 | MRI.setRegClass(Reg, TRI->getRegClass(NewClassID)); |
11790 | } |
11791 | } |
11792 | |
11793 | TargetLoweringBase::finalizeLowering(MF); |
11794 | |
11795 | |
11796 | |
11797 | |
11798 | if (VGPRReserveforSGPRSpill && TRI->spillSGPRToVGPR() && |
11799 | !Info->VGPRReservedForSGPRSpill && !Info->isEntryFunction()) |
11800 | Info->reserveVGPRforSGPRSpills(MF); |
11801 | } |
11802 | |
11803 | void SITargetLowering::computeKnownBitsForFrameIndex( |
11804 | const int FI, KnownBits &Known, const MachineFunction &MF) const { |
11805 | TargetLowering::computeKnownBitsForFrameIndex(FI, Known, MF); |
11806 | |
11807 | |
11808 | |
11809 | |
11810 | Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); |
11811 | } |
11812 | |
11813 | static void knownBitsForWorkitemID(const GCNSubtarget &ST, GISelKnownBits &KB, |
11814 | KnownBits &Known, unsigned Dim) { |
11815 | unsigned MaxValue = |
11816 | ST.getMaxWorkitemID(KB.getMachineFunction().getFunction(), Dim); |
11817 | Known.Zero.setHighBits(countLeadingZeros(MaxValue)); |
11818 | } |
11819 | |
11820 | void SITargetLowering::computeKnownBitsForTargetInstr( |
11821 | GISelKnownBits &KB, Register R, KnownBits &Known, const APInt &DemandedElts, |
11822 | const MachineRegisterInfo &MRI, unsigned Depth) const { |
11823 | const MachineInstr *MI = MRI.getVRegDef(R); |
11824 | switch (MI->getOpcode()) { |
11825 | case AMDGPU::G_INTRINSIC: { |
11826 | switch (MI->getIntrinsicID()) { |
11827 | case Intrinsic::amdgcn_workitem_id_x: |
11828 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 0); |
11829 | break; |
11830 | case Intrinsic::amdgcn_workitem_id_y: |
11831 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 1); |
11832 | break; |
11833 | case Intrinsic::amdgcn_workitem_id_z: |
11834 | knownBitsForWorkitemID(*getSubtarget(), KB, Known, 2); |
11835 | break; |
11836 | case Intrinsic::amdgcn_mbcnt_lo: |
11837 | case Intrinsic::amdgcn_mbcnt_hi: { |
11838 | |
11839 | unsigned Size = MRI.getType(R).getSizeInBits(); |
11840 | Known.Zero.setHighBits(Size - getSubtarget()->getWavefrontSizeLog2()); |
11841 | break; |
11842 | } |
11843 | case Intrinsic::amdgcn_groupstaticsize: { |
11844 | |
11845 | |
11846 | |
11847 | Known.Zero.setHighBits(countLeadingZeros(getSubtarget()->getLocalMemorySize())); |
11848 | break; |
11849 | } |
11850 | } |
11851 | break; |
11852 | } |
11853 | case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE: |
11854 | Known.Zero.setHighBits(24); |
11855 | break; |
11856 | case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT: |
11857 | Known.Zero.setHighBits(16); |
11858 | break; |
11859 | } |
11860 | } |
11861 | |
11862 | Align SITargetLowering::computeKnownAlignForTargetInstr( |
11863 | GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI, |
11864 | unsigned Depth) const { |
11865 | const MachineInstr *MI = MRI.getVRegDef(R); |
11866 | switch (MI->getOpcode()) { |
11867 | case AMDGPU::G_INTRINSIC: |
11868 | case AMDGPU::G_INTRINSIC_W_SIDE_EFFECTS: { |
11869 | |
11870 | |
11871 | Intrinsic::ID IID = MI->getIntrinsicID(); |
11872 | LLVMContext &Ctx = KB.getMachineFunction().getFunction().getContext(); |
11873 | AttributeList Attrs = Intrinsic::getAttributes(Ctx, IID); |
11874 | if (MaybeAlign RetAlign = Attrs.getRetAlignment()) |
11875 | return *RetAlign; |
11876 | return Align(1); |
11877 | } |
11878 | default: |
11879 | return Align(1); |
11880 | } |
11881 | } |
11882 | |
11883 | Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { |
11884 | const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); |
11885 | const Align CacheLineAlign = Align(64); |
11886 | |
11887 | |
11888 | if (!ML || DisableLoopAlignment || |
11889 | (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || |
11890 | getSubtarget()->hasInstFwdPrefetchBug()) |
11891 | return PrefAlign; |
11892 | |
11893 | |
11894 | |
11895 | |
11896 | |
11897 | |
11898 | |
11899 | |
11900 | |
11901 | |
11902 | |
11903 | const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); |
11904 | const MachineBasicBlock *Header = ML->getHeader(); |
11905 | if (Header->getAlignment() != PrefAlign) |
11906 | return Header->getAlignment(); |
11907 | |
11908 | unsigned LoopSize = 0; |
11909 | for (const MachineBasicBlock *MBB : ML->blocks()) { |
11910 | |
11911 | |
11912 | if (MBB != Header) |
11913 | LoopSize += MBB->getAlignment().value() / 2; |
11914 | |
11915 | for (const MachineInstr &MI : *MBB) { |
11916 | LoopSize += TII->getInstSizeInBytes(MI); |
11917 | if (LoopSize > 192) |
11918 | return PrefAlign; |
11919 | } |
11920 | } |
11921 | |
11922 | if (LoopSize <= 64) |
11923 | return PrefAlign; |
11924 | |
11925 | if (LoopSize <= 128) |
11926 | return CacheLineAlign; |
11927 | |
11928 | |
11929 | |
11930 | for (MachineLoop *P = ML->getParentLoop(); P; P = P->getParentLoop()) { |
11931 | if (MachineBasicBlock *Exit = P->getExitBlock()) { |
11932 | auto I = Exit->getFirstNonDebugInstr(); |
11933 | if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) |
11934 | return CacheLineAlign; |
11935 | } |
11936 | } |
11937 | |
11938 | MachineBasicBlock *Pre = ML->getLoopPreheader(); |
11939 | MachineBasicBlock *Exit = ML->getExitBlock(); |
11940 | |
11941 | if (Pre && Exit) { |
11942 | BuildMI(*Pre, Pre->getFirstTerminator(), DebugLoc(), |
11943 | TII->get(AMDGPU::S_INST_PREFETCH)) |
11944 | .addImm(1); |
11945 | |
11946 | BuildMI(*Exit, Exit->getFirstNonDebugInstr(), DebugLoc(), |
11947 | TII->get(AMDGPU::S_INST_PREFETCH)) |
11948 | .addImm(2); |
11949 | } |
11950 | |
11951 | return CacheLineAlign; |
11952 | } |
11953 | |
11954 | LLVM_ATTRIBUTE_UNUSED |
11955 | static bool isCopyFromRegOfInlineAsm(const SDNode *N) { |
11956 | assert(N->getOpcode() == ISD::CopyFromReg); |
11957 | do { |
11958 | |
11959 | N = N->getOperand(0).getNode(); |
11960 | if (N->getOpcode() == ISD::INLINEASM || |
11961 | N->getOpcode() == ISD::INLINEASM_BR) |
11962 | return true; |
11963 | } while (N->getOpcode() == ISD::CopyFromReg); |
11964 | return false; |
11965 | } |
11966 | |
11967 | bool SITargetLowering::isSDNodeSourceOfDivergence( |
11968 | const SDNode *N, FunctionLoweringInfo *FLI, |
11969 | LegacyDivergenceAnalysis *KDA) const { |
11970 | switch (N->getOpcode()) { |
11971 | case ISD::CopyFromReg: { |
11972 | const RegisterSDNode *R = cast<RegisterSDNode>(N->getOperand(1)); |
11973 | const MachineRegisterInfo &MRI = FLI->MF->getRegInfo(); |
11974 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
11975 | Register Reg = R->getReg(); |
11976 | |
11977 | |
11978 | if (Reg.isPhysical() || MRI.isLiveIn(Reg)) |
11979 | return !TRI->isSGPRReg(MRI, Reg); |
11980 | |
11981 | if (const Value *V = FLI->getValueFromVirtualReg(R->getReg())) |
11982 | return KDA->isDivergent(V); |
11983 | |
11984 | assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)); |
11985 | return !TRI->isSGPRReg(MRI, Reg); |
11986 | } |
11987 | case ISD::LOAD: { |
11988 | const LoadSDNode *L = cast<LoadSDNode>(N); |
11989 | unsigned AS = L->getAddressSpace(); |
11990 | |
11991 | return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; |
11992 | } |
11993 | case ISD::CALLSEQ_END: |
11994 | return true; |
11995 | case ISD::INTRINSIC_WO_CHAIN: |
11996 | return AMDGPU::isIntrinsicSourceOfDivergence( |
11997 | cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); |
11998 | case ISD::INTRINSIC_W_CHAIN: |
11999 | return AMDGPU::isIntrinsicSourceOfDivergence( |
12000 | cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); |
12001 | case AMDGPUISD::ATOMIC_CMP_SWAP: |
12002 | case AMDGPUISD::ATOMIC_INC: |
12003 | case AMDGPUISD::ATOMIC_DEC: |
12004 | case AMDGPUISD::ATOMIC_LOAD_FMIN: |
12005 | case AMDGPUISD::ATOMIC_LOAD_FMAX: |
12006 | case AMDGPUISD::BUFFER_ATOMIC_SWAP: |
12007 | case AMDGPUISD::BUFFER_ATOMIC_ADD: |
12008 | case AMDGPUISD::BUFFER_ATOMIC_SUB: |
12009 | case AMDGPUISD::BUFFER_ATOMIC_SMIN: |
12010 | case AMDGPUISD::BUFFER_ATOMIC_UMIN: |
12011 | case AMDGPUISD::BUFFER_ATOMIC_SMAX: |
12012 | case AMDGPUISD::BUFFER_ATOMIC_UMAX: |
12013 | case AMDGPUISD::BUFFER_ATOMIC_AND: |
12014 | case AMDGPUISD::BUFFER_ATOMIC_OR: |
12015 | case AMDGPUISD::BUFFER_ATOMIC_XOR: |
12016 | case AMDGPUISD::BUFFER_ATOMIC_INC: |
12017 | case AMDGPUISD::BUFFER_ATOMIC_DEC: |
12018 | case AMDGPUISD::BUFFER_ATOMIC_CMPSWAP: |
12019 | case AMDGPUISD::BUFFER_ATOMIC_CSUB: |
12020 | case AMDGPUISD::BUFFER_ATOMIC_FADD: |
12021 | case AMDGPUISD::BUFFER_ATOMIC_FMIN: |
12022 | case AMDGPUISD::BUFFER_ATOMIC_FMAX: |
12023 | |
12024 | return true; |
12025 | default: |
12026 | if (auto *A = dyn_cast<AtomicSDNode>(N)) { |
12027 | |
12028 | return A->readMem() && A->writeMem(); |
12029 | } |
12030 | return false; |
12031 | } |
12032 | } |
12033 | |
12034 | bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG, |
12035 | EVT VT) const { |
12036 | switch (VT.getScalarType().getSimpleVT().SimpleTy) { |
12037 | case MVT::f32: |
12038 | return hasFP32Denormals(DAG.getMachineFunction()); |
12039 | case MVT::f64: |
12040 | case MVT::f16: |
12041 | return hasFP64FP16Denormals(DAG.getMachineFunction()); |
12042 | default: |
12043 | return false; |
12044 | } |
12045 | } |
12046 | |
12047 | bool SITargetLowering::denormalsEnabledForType(LLT Ty, |
12048 | MachineFunction &MF) const { |
12049 | switch (Ty.getScalarSizeInBits()) { |
12050 | case 32: |
12051 | return hasFP32Denormals(MF); |
12052 | case 64: |
12053 | case 16: |
12054 | return hasFP64FP16Denormals(MF); |
12055 | default: |
12056 | return false; |
12057 | } |
12058 | } |
12059 | |
12060 | bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, |
12061 | const SelectionDAG &DAG, |
12062 | bool SNaN, |
12063 | unsigned Depth) const { |
12064 | if (Op.getOpcode() == AMDGPUISD::CLAMP) { |
12065 | const MachineFunction &MF = DAG.getMachineFunction(); |
12066 | const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); |
12067 | |
12068 | if (Info->getMode().DX10Clamp) |
12069 | return true; |
12070 | return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); |
12071 | } |
12072 | |
12073 | return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG, |
12074 | SNaN, Depth); |
12075 | } |
12076 | |
12077 | |
12078 | |
12079 | static bool fpModeMatchesGlobalFPAtomicMode(const AtomicRMWInst *RMW) { |
12080 | const fltSemantics &Flt = RMW->getType()->getScalarType()->getFltSemantics(); |
12081 | auto DenormMode = RMW->getParent()->getParent()->getDenormalMode(Flt); |
12082 | if (&Flt == &APFloat::IEEEsingle()) |
12083 | return DenormMode == DenormalMode::getPreserveSign(); |
12084 | return DenormMode == DenormalMode::getIEEE(); |
12085 | } |
12086 | |
12087 | TargetLowering::AtomicExpansionKind |
12088 | SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { |
12089 | switch (RMW->getOperation()) { |
12090 | case AtomicRMWInst::FAdd: { |
12091 | Type *Ty = RMW->getType(); |
12092 | |
12093 | |
12094 | |
12095 | if (Ty->isHalfTy()) |
12096 | return AtomicExpansionKind::None; |
12097 | |
12098 | if (!Ty->isFloatTy() && (!Subtarget->hasGFX90AInsts() || !Ty->isDoubleTy())) |
12099 | return AtomicExpansionKind::CmpXChg; |
12100 | |
12101 | unsigned AS = RMW->getPointerAddressSpace(); |
12102 | |
12103 | if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) && |
12104 | Subtarget->hasAtomicFaddInsts()) { |
12105 | |
12106 | |
12107 | |
12108 | |
12109 | if (RMW->getFunction() |
12110 | ->getFnAttribute("amdgpu-unsafe-fp-atomics") |
12111 | .getValueAsString() != "true") |
12112 | return AtomicExpansionKind::CmpXChg; |
12113 | |
12114 | if (Subtarget->hasGFX90AInsts()) { |
12115 | if (Ty->isFloatTy() && AS == AMDGPUAS::FLAT_ADDRESS) |
12116 | return AtomicExpansionKind::CmpXChg; |
12117 | |
12118 | auto SSID = RMW->getSyncScopeID(); |
12119 | if (SSID == SyncScope::System || |
12120 | SSID == RMW->getContext().getOrInsertSyncScopeID("one-as")) |
12121 | return AtomicExpansionKind::CmpXChg; |
12122 | |
12123 | return AtomicExpansionKind::None; |
12124 | } |
12125 | |
12126 | if (AS == AMDGPUAS::FLAT_ADDRESS) |
12127 | return AtomicExpansionKind::CmpXChg; |
12128 | |
12129 | return RMW->use_empty() ? AtomicExpansionKind::None |
12130 | : AtomicExpansionKind::CmpXChg; |
12131 | } |
12132 | |
12133 | |
12134 | |
12135 | |
12136 | if (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) { |
12137 | if (!Ty->isDoubleTy()) |
12138 | return AtomicExpansionKind::None; |
12139 | |
12140 | return (fpModeMatchesGlobalFPAtomicMode(RMW) || |
12141 | RMW->getFunction() |
12142 | ->getFnAttribute("amdgpu-unsafe-fp-atomics") |
12143 | .getValueAsString() == "true") |
12144 | ? AtomicExpansionKind::None |
12145 | : AtomicExpansionKind::CmpXChg; |
12146 | } |
12147 | |
12148 | return AtomicExpansionKind::CmpXChg; |
12149 | } |
12150 | default: |
12151 | break; |
12152 | } |
12153 | |
12154 | return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); |
12155 | } |
12156 | |
12157 | const TargetRegisterClass * |
12158 | SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { |
12159 | const TargetRegisterClass *RC = TargetLoweringBase::getRegClassFor(VT, false); |
12160 | const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); |
12161 | if (RC == &AMDGPU::VReg_1RegClass && !isDivergent) |
12162 | return Subtarget->getWavefrontSize() == 64 ? &AMDGPU::SReg_64RegClass |
12163 | : &AMDGPU::SReg_32RegClass; |
12164 | if (!TRI->isSGPRClass(RC) && !isDivergent) |
12165 | return TRI->getEquivalentSGPRClass(RC); |
12166 | else if (TRI->isSGPRClass(RC) && isDivergent) |
12167 | return TRI->getEquivalentVGPRClass(RC); |
12168 | |
12169 | return RC; |
12170 | } |
12171 | |
12172 | |
12173 | |
12174 | |
12175 | |
12176 | static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited, |
12177 | unsigned WaveSize) { |
12178 | |
12179 | |
12180 | |
12181 | IntegerType *IT = dyn_cast<IntegerType>(V->getType()); |
12182 | if (!IT || IT->getBitWidth() != WaveSize) |
12183 | return false; |
12184 | |
12185 | if (!isa<Instruction>(V)) |
12186 | return false; |
12187 | if (!Visited.insert(V).second) |
12188 | return false; |
12189 | bool Result = false; |
12190 | for (auto U : V->users()) { |
12191 | if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) { |
12192 | if (V == U->getOperand(1)) { |
12193 | switch (Intrinsic->getIntrinsicID()) { |
12194 | default: |
12195 | Result = false; |
12196 | break; |
12197 | case Intrinsic::amdgcn_if_break: |
12198 | case Intrinsic::amdgcn_if: |
12199 | case Intrinsic::amdgcn_else: |
12200 | Result = true; |
12201 | break; |
12202 | } |
12203 | } |
12204 | if (V == U->getOperand(0)) { |
12205 | switch (Intrinsic->getIntrinsicID()) { |
12206 | default: |
12207 | Result = false; |
12208 | break; |
12209 | case Intrinsic::amdgcn_end_cf: |
12210 | case Intrinsic::amdgcn_loop: |
12211 | Result = true; |
12212 | break; |
12213 | } |
12214 | } |
12215 | } else { |
12216 | Result = hasCFUser(U, Visited, WaveSize); |
12217 | } |
12218 | if (Result) |
12219 | break; |
12220 | } |
12221 | return Result; |
12222 | } |
12223 | |
12224 | bool SITargetLowering::requiresUniformRegister(MachineFunction &MF, |
12225 | const Value *V) const { |
12226 | if (const CallInst *CI = dyn_cast<CallInst>(V)) { |
12227 | if (CI->isInlineAsm()) { |
12228 | |
12229 | |
12230 | |
12231 | |
12232 | |
12233 | const SIRegisterInfo *SIRI = Subtarget->getRegisterInfo(); |
12234 | TargetLowering::AsmOperandInfoVector TargetConstraints = ParseConstraints( |
12235 | MF.getDataLayout(), Subtarget->getRegisterInfo(), *CI); |
12236 | for (auto &TC : TargetConstraints) { |
12237 | if (TC.Type == InlineAsm::isOutput) { |
12238 | ComputeConstraintToUse(TC, SDValue()); |
12239 | unsigned AssignedReg; |
12240 | const TargetRegisterClass *RC; |
12241 | std::tie(AssignedReg, RC) = getRegForInlineAsmConstraint( |
12242 | SIRI, TC.ConstraintCode, TC.ConstraintVT); |
12243 | if (RC) { |
12244 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
12245 | if (AssignedReg != 0 && SIRI->isSGPRReg(MRI, AssignedReg)) |
12246 | return true; |
12247 | else if (SIRI->isSGPRClass(RC)) |
12248 | return true; |
12249 | } |
12250 | } |
12251 | } |
12252 | } |
12253 | } |
12254 | SmallPtrSet<const Value *, 16> Visited; |
12255 | return hasCFUser(V, Visited, Subtarget->getWavefrontSize()); |
12256 | } |
12257 | |
12258 | std::pair<InstructionCost, MVT> |
12259 | SITargetLowering::getTypeLegalizationCost(const DataLayout &DL, |
12260 | Type *Ty) const { |
12261 | std::pair<InstructionCost, MVT> Cost = |
12262 | TargetLoweringBase::getTypeLegalizationCost(DL, Ty); |
12263 | auto Size = DL.getTypeSizeInBits(Ty); |
12264 | |
12265 | |
12266 | |
12267 | if (Size <= 256) |
12268 | return Cost; |
12269 | |
12270 | Cost.first = (Size + 255) / 256; |
12271 | return Cost; |
12272 | } |