clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CGBuiltin.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/llvm/include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/../include -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -I /usr/src/gnu/usr.bin/clang/libclangCodeGen/obj/../include -D NDEBUG -D __STDC_LIMIT_MACROS -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D LLVM_PREFIX="/usr" -internal-isystem /usr/include/c++/v1 -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/usr/src/gnu/usr.bin/clang/libclangCodeGen/obj -ferror-limit 19 -fvisibility-inlines-hidden -fwrapv -stack-protector 2 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c++ /usr/src/gnu/usr.bin/clang/libclangCodeGen/../../../llvm/clang/lib/CodeGen/CGBuiltin.cpp
1 | |
2 | |
3 | |
4 | |
5 | |
6 | |
7 | |
8 | |
9 | |
10 | |
11 | |
12 | |
13 | #include "CGCUDARuntime.h" |
14 | #include "CGCXXABI.h" |
15 | #include "CGObjCRuntime.h" |
16 | #include "CGOpenCLRuntime.h" |
17 | #include "CGRecordLayout.h" |
18 | #include "CodeGenFunction.h" |
19 | #include "CodeGenModule.h" |
20 | #include "ConstantEmitter.h" |
21 | #include "PatternInit.h" |
22 | #include "TargetInfo.h" |
23 | #include "clang/AST/ASTContext.h" |
24 | #include "clang/AST/Attr.h" |
25 | #include "clang/AST/Decl.h" |
26 | #include "clang/AST/OSLog.h" |
27 | #include "clang/Basic/TargetBuiltins.h" |
28 | #include "clang/Basic/TargetInfo.h" |
29 | #include "clang/CodeGen/CGFunctionInfo.h" |
30 | #include "llvm/ADT/APFloat.h" |
31 | #include "llvm/ADT/APInt.h" |
32 | #include "llvm/ADT/SmallPtrSet.h" |
33 | #include "llvm/ADT/StringExtras.h" |
34 | #include "llvm/Analysis/ValueTracking.h" |
35 | #include "llvm/IR/DataLayout.h" |
36 | #include "llvm/IR/InlineAsm.h" |
37 | #include "llvm/IR/Intrinsics.h" |
38 | #include "llvm/IR/IntrinsicsAArch64.h" |
39 | #include "llvm/IR/IntrinsicsAMDGPU.h" |
40 | #include "llvm/IR/IntrinsicsARM.h" |
41 | #include "llvm/IR/IntrinsicsBPF.h" |
42 | #include "llvm/IR/IntrinsicsHexagon.h" |
43 | #include "llvm/IR/IntrinsicsNVPTX.h" |
44 | #include "llvm/IR/IntrinsicsPowerPC.h" |
45 | #include "llvm/IR/IntrinsicsR600.h" |
46 | #include "llvm/IR/IntrinsicsRISCV.h" |
47 | #include "llvm/IR/IntrinsicsS390.h" |
48 | #include "llvm/IR/IntrinsicsWebAssembly.h" |
49 | #include "llvm/IR/IntrinsicsX86.h" |
50 | #include "llvm/IR/MDBuilder.h" |
51 | #include "llvm/IR/MatrixBuilder.h" |
52 | #include "llvm/Support/ConvertUTF.h" |
53 | #include "llvm/Support/ScopedPrinter.h" |
54 | #include "llvm/Support/X86TargetParser.h" |
55 | #include <sstream> |
56 | |
57 | using namespace clang; |
58 | using namespace CodeGen; |
59 | using namespace llvm; |
60 | |
61 | static |
62 | int64_t clamp(int64_t Value, int64_t Low, int64_t High) { |
63 | return std::min(High, std::max(Low, Value)); |
64 | } |
65 | |
66 | static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, |
67 | Align AlignmentInBytes) { |
68 | ConstantInt *Byte; |
69 | switch (CGF.getLangOpts().getTrivialAutoVarInit()) { |
70 | case LangOptions::TrivialAutoVarInitKind::Uninitialized: |
71 | |
72 | return; |
73 | case LangOptions::TrivialAutoVarInitKind::Zero: |
74 | Byte = CGF.Builder.getInt8(0x00); |
75 | break; |
76 | case LangOptions::TrivialAutoVarInitKind::Pattern: { |
77 | llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext()); |
78 | Byte = llvm::dyn_cast<llvm::ConstantInt>( |
79 | initializationPatternFor(CGF.CGM, Int8)); |
80 | break; |
81 | } |
82 | } |
83 | if (CGF.CGM.stopAutoInit()) |
84 | return; |
85 | auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes); |
86 | I->addAnnotationMetadata("auto-init"); |
87 | } |
88 | |
89 | |
90 | |
91 | llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, |
92 | unsigned BuiltinID) { |
93 | assert(Context.BuiltinInfo.isLibFunction(BuiltinID)); |
94 | |
95 | |
96 | StringRef Name; |
97 | GlobalDecl D(FD); |
98 | |
99 | |
100 | |
101 | |
102 | if (FD->hasAttr<AsmLabelAttr>()) |
103 | Name = getMangledName(D); |
104 | else |
105 | Name = Context.BuiltinInfo.getName(BuiltinID) + 10; |
106 | |
107 | llvm::FunctionType *Ty = |
108 | cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); |
109 | |
110 | return GetOrCreateLLVMFunction(Name, Ty, D, false); |
111 | } |
112 | |
113 | |
114 | |
115 | static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, |
116 | QualType T, llvm::IntegerType *IntType) { |
117 | V = CGF.EmitToMemory(V, T); |
118 | |
119 | if (V->getType()->isPointerTy()) |
120 | return CGF.Builder.CreatePtrToInt(V, IntType); |
121 | |
122 | assert(V->getType() == IntType); |
123 | return V; |
124 | } |
125 | |
126 | static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, |
127 | QualType T, llvm::Type *ResultType) { |
128 | V = CGF.EmitFromMemory(V, T); |
129 | |
130 | if (ResultType->isPointerTy()) |
131 | return CGF.Builder.CreateIntToPtr(V, ResultType); |
132 | |
133 | assert(V->getType() == ResultType); |
134 | return V; |
135 | } |
136 | |
137 | |
138 | |
139 | static Value *MakeBinaryAtomicValue( |
140 | CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, |
141 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
142 | QualType T = E->getType(); |
143 | assert(E->getArg(0)->getType()->isPointerType()); |
144 | assert(CGF.getContext().hasSameUnqualifiedType(T, |
145 | E->getArg(0)->getType()->getPointeeType())); |
146 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
147 | |
148 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
149 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
150 | |
151 | llvm::IntegerType *IntType = |
152 | llvm::IntegerType::get(CGF.getLLVMContext(), |
153 | CGF.getContext().getTypeSize(T)); |
154 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
155 | |
156 | llvm::Value *Args[2]; |
157 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
158 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
159 | llvm::Type *ValueType = Args[1]->getType(); |
160 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
161 | |
162 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
163 | Kind, Args[0], Args[1], Ordering); |
164 | return EmitFromInt(CGF, Result, T, ValueType); |
165 | } |
166 | |
167 | static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { |
168 | Value *Val = CGF.EmitScalarExpr(E->getArg(0)); |
169 | Value *Address = CGF.EmitScalarExpr(E->getArg(1)); |
170 | |
171 | |
172 | Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); |
173 | Value *BC = CGF.Builder.CreateBitCast( |
174 | Address, llvm::PointerType::getUnqual(Val->getType()), "cast"); |
175 | LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType()); |
176 | LV.setNontemporal(true); |
177 | CGF.EmitStoreOfScalar(Val, LV, false); |
178 | return nullptr; |
179 | } |
180 | |
181 | static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { |
182 | Value *Address = CGF.EmitScalarExpr(E->getArg(0)); |
183 | |
184 | LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType()); |
185 | LV.setNontemporal(true); |
186 | return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); |
187 | } |
188 | |
189 | static RValue EmitBinaryAtomic(CodeGenFunction &CGF, |
190 | llvm::AtomicRMWInst::BinOp Kind, |
191 | const CallExpr *E) { |
192 | return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E)); |
193 | } |
194 | |
195 | |
196 | |
197 | |
198 | static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, |
199 | llvm::AtomicRMWInst::BinOp Kind, |
200 | const CallExpr *E, |
201 | Instruction::BinaryOps Op, |
202 | bool Invert = false) { |
203 | QualType T = E->getType(); |
204 | assert(E->getArg(0)->getType()->isPointerType()); |
205 | assert(CGF.getContext().hasSameUnqualifiedType(T, |
206 | E->getArg(0)->getType()->getPointeeType())); |
207 | assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); |
208 | |
209 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
210 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
211 | |
212 | llvm::IntegerType *IntType = |
213 | llvm::IntegerType::get(CGF.getLLVMContext(), |
214 | CGF.getContext().getTypeSize(T)); |
215 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
216 | |
217 | llvm::Value *Args[2]; |
218 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
219 | llvm::Type *ValueType = Args[1]->getType(); |
220 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
221 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
222 | |
223 | llvm::Value *Result = CGF.Builder.CreateAtomicRMW( |
224 | Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent); |
225 | Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); |
226 | if (Invert) |
227 | Result = |
228 | CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result, |
229 | llvm::ConstantInt::getAllOnesValue(IntType)); |
230 | Result = EmitFromInt(CGF, Result, T, ValueType); |
231 | return RValue::get(Result); |
232 | } |
233 | |
234 | |
235 | |
236 | |
237 | |
238 | |
239 | |
240 | |
241 | |
242 | |
243 | |
244 | |
245 | |
246 | |
247 | |
248 | static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, |
249 | bool ReturnBool) { |
250 | QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); |
251 | llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); |
252 | unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); |
253 | |
254 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
255 | CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); |
256 | llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); |
257 | |
258 | Value *Args[3]; |
259 | Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); |
260 | Args[1] = CGF.EmitScalarExpr(E->getArg(1)); |
261 | llvm::Type *ValueType = Args[1]->getType(); |
262 | Args[1] = EmitToInt(CGF, Args[1], T, IntType); |
263 | Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); |
264 | |
265 | Value *Pair = CGF.Builder.CreateAtomicCmpXchg( |
266 | Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent, |
267 | llvm::AtomicOrdering::SequentiallyConsistent); |
268 | if (ReturnBool) |
269 | |
270 | return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1), |
271 | CGF.ConvertType(E->getType())); |
272 | else |
273 | |
274 | return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T, |
275 | ValueType); |
276 | } |
277 | |
278 | |
279 | |
280 | |
281 | |
282 | |
283 | |
284 | |
285 | |
286 | |
287 | |
288 | |
289 | |
290 | |
291 | static |
292 | Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, |
293 | AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) { |
294 | assert(E->getArg(0)->getType()->isPointerType()); |
295 | assert(CGF.getContext().hasSameUnqualifiedType( |
296 | E->getType(), E->getArg(0)->getType()->getPointeeType())); |
297 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), |
298 | E->getArg(1)->getType())); |
299 | assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), |
300 | E->getArg(2)->getType())); |
301 | |
302 | auto *Destination = CGF.EmitScalarExpr(E->getArg(0)); |
303 | auto *Comparand = CGF.EmitScalarExpr(E->getArg(2)); |
304 | auto *Exchange = CGF.EmitScalarExpr(E->getArg(1)); |
305 | |
306 | |
307 | auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ? |
308 | AtomicOrdering::Monotonic : |
309 | SuccessOrdering; |
310 | |
311 | |
312 | |
313 | |
314 | |
315 | auto *Result = CGF.Builder.CreateAtomicCmpXchg( |
316 | Destination, Comparand, Exchange, |
317 | SuccessOrdering, FailureOrdering); |
318 | Result->setVolatile(true); |
319 | return CGF.Builder.CreateExtractValue(Result, 0); |
320 | } |
321 | |
322 | |
323 | |
324 | |
325 | |
326 | |
327 | |
328 | |
329 | |
330 | static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, |
331 | const CallExpr *E, |
332 | AtomicOrdering SuccessOrdering) { |
333 | assert(E->getNumArgs() == 4); |
334 | llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0)); |
335 | llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1)); |
336 | llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2)); |
337 | llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3)); |
338 | |
339 | assert(Destination->getType()->isPointerTy()); |
340 | assert(!ExchangeHigh->getType()->isPointerTy()); |
341 | assert(!ExchangeLow->getType()->isPointerTy()); |
342 | assert(ComparandPtr->getType()->isPointerTy()); |
343 | |
344 | |
345 | auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release |
346 | ? AtomicOrdering::Monotonic |
347 | : SuccessOrdering; |
348 | |
349 | |
350 | llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128); |
351 | llvm::Type *Int128PtrTy = Int128Ty->getPointerTo(); |
352 | Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy); |
353 | Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy), |
354 | CGF.getContext().toCharUnitsFromBits(128)); |
355 | |
356 | |
357 | ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty); |
358 | ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty); |
359 | ExchangeHigh = |
360 | CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64)); |
361 | llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow); |
362 | |
363 | |
364 | llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult); |
365 | |
366 | auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, |
367 | SuccessOrdering, FailureOrdering); |
368 | |
369 | |
370 | |
371 | |
372 | |
373 | CXI->setVolatile(true); |
374 | |
375 | |
376 | CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0), |
377 | ComparandResult); |
378 | |
379 | |
380 | Value *Success = CGF.Builder.CreateExtractValue(CXI, 1); |
381 | return CGF.Builder.CreateZExt(Success, CGF.Int8Ty); |
382 | } |
383 | |
384 | static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, |
385 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
386 | assert(E->getArg(0)->getType()->isPointerType()); |
387 | |
388 | auto *IntTy = CGF.ConvertType(E->getType()); |
389 | auto *Result = CGF.Builder.CreateAtomicRMW( |
390 | AtomicRMWInst::Add, |
391 | CGF.EmitScalarExpr(E->getArg(0)), |
392 | ConstantInt::get(IntTy, 1), |
393 | Ordering); |
394 | return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1)); |
395 | } |
396 | |
397 | static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, |
398 | AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { |
399 | assert(E->getArg(0)->getType()->isPointerType()); |
400 | |
401 | auto *IntTy = CGF.ConvertType(E->getType()); |
402 | auto *Result = CGF.Builder.CreateAtomicRMW( |
403 | AtomicRMWInst::Sub, |
404 | CGF.EmitScalarExpr(E->getArg(0)), |
405 | ConstantInt::get(IntTy, 1), |
406 | Ordering); |
407 | return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1)); |
408 | } |
409 | |
410 | |
411 | static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) { |
412 | Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
413 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
414 | CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy); |
415 | llvm::Type *ITy = |
416 | llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8); |
417 | Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
418 | llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize); |
419 | Load->setVolatile(true); |
420 | return Load; |
421 | } |
422 | |
423 | |
424 | static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) { |
425 | Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); |
426 | Value *Value = CGF.EmitScalarExpr(E->getArg(1)); |
427 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
428 | CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy); |
429 | llvm::Type *ITy = |
430 | llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8); |
431 | Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
432 | llvm::StoreInst *Store = |
433 | CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize); |
434 | Store->setVolatile(true); |
435 | return Store; |
436 | } |
437 | |
438 | |
439 | |
440 | |
441 | static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
442 | const CallExpr *E, unsigned IntrinsicID, |
443 | unsigned ConstrainedIntrinsicID) { |
444 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
445 | |
446 | if (CGF.Builder.getIsFPConstrained()) { |
447 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
448 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
449 | return CGF.Builder.CreateConstrainedFPCall(F, { Src0 }); |
450 | } else { |
451 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
452 | return CGF.Builder.CreateCall(F, Src0); |
453 | } |
454 | } |
455 | |
456 | |
457 | |
458 | static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
459 | const CallExpr *E, unsigned IntrinsicID, |
460 | unsigned ConstrainedIntrinsicID) { |
461 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
462 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
463 | |
464 | if (CGF.Builder.getIsFPConstrained()) { |
465 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
466 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
467 | return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 }); |
468 | } else { |
469 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
470 | return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
471 | } |
472 | } |
473 | |
474 | |
475 | |
476 | static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
477 | const CallExpr *E, unsigned IntrinsicID, |
478 | unsigned ConstrainedIntrinsicID) { |
479 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
480 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
481 | llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
482 | |
483 | if (CGF.Builder.getIsFPConstrained()) { |
484 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
485 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); |
486 | return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 }); |
487 | } else { |
488 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
489 | return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
490 | } |
491 | } |
492 | |
493 | |
494 | |
495 | static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, |
496 | unsigned IntrinsicID, |
497 | unsigned ConstrainedIntrinsicID, |
498 | llvm::Type *Ty, |
499 | ArrayRef<Value *> Args) { |
500 | Function *F; |
501 | if (CGF.Builder.getIsFPConstrained()) |
502 | F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty); |
503 | else |
504 | F = CGF.CGM.getIntrinsic(IntrinsicID, Ty); |
505 | |
506 | if (CGF.Builder.getIsFPConstrained()) |
507 | return CGF.Builder.CreateConstrainedFPCall(F, Args); |
508 | else |
509 | return CGF.Builder.CreateCall(F, Args); |
510 | } |
511 | |
512 | |
513 | |
514 | static Value *emitUnaryBuiltin(CodeGenFunction &CGF, |
515 | const CallExpr *E, |
516 | unsigned IntrinsicID) { |
517 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
518 | |
519 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
520 | return CGF.Builder.CreateCall(F, Src0); |
521 | } |
522 | |
523 | |
524 | static Value *emitBinaryBuiltin(CodeGenFunction &CGF, |
525 | const CallExpr *E, |
526 | unsigned IntrinsicID) { |
527 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
528 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
529 | |
530 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
531 | return CGF.Builder.CreateCall(F, { Src0, Src1 }); |
532 | } |
533 | |
534 | |
535 | static Value *emitTernaryBuiltin(CodeGenFunction &CGF, |
536 | const CallExpr *E, |
537 | unsigned IntrinsicID) { |
538 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
539 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
540 | llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); |
541 | |
542 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
543 | return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); |
544 | } |
545 | |
546 | |
547 | static Value *emitFPIntBuiltin(CodeGenFunction &CGF, |
548 | const CallExpr *E, |
549 | unsigned IntrinsicID) { |
550 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
551 | llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); |
552 | |
553 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); |
554 | return CGF.Builder.CreateCall(F, {Src0, Src1}); |
555 | } |
556 | |
557 | |
558 | static Value * |
559 | emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, |
560 | unsigned IntrinsicID, |
561 | unsigned ConstrainedIntrinsicID) { |
562 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
563 | llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); |
564 | |
565 | if (CGF.Builder.getIsFPConstrained()) { |
566 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
567 | Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, |
568 | {ResultType, Src0->getType()}); |
569 | return CGF.Builder.CreateConstrainedFPCall(F, {Src0}); |
570 | } else { |
571 | Function *F = |
572 | CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()}); |
573 | return CGF.Builder.CreateCall(F, Src0); |
574 | } |
575 | } |
576 | |
577 | |
578 | static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { |
579 | Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); |
580 | llvm::CallInst *Call = CGF.Builder.CreateCall(F, V); |
581 | Call->setDoesNotAccessMemory(); |
582 | return Call; |
583 | } |
584 | |
585 | |
586 | |
587 | static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { |
588 | LLVMContext &C = CGF.CGM.getLLVMContext(); |
589 | |
590 | llvm::Type *Ty = V->getType(); |
591 | int Width = Ty->getPrimitiveSizeInBits(); |
592 | llvm::Type *IntTy = llvm::IntegerType::get(C, Width); |
593 | V = CGF.Builder.CreateBitCast(V, IntTy); |
594 | if (Ty->isPPC_FP128Ty()) { |
595 | |
596 | |
597 | |
598 | |
599 | |
600 | |
601 | |
602 | Width >>= 1; |
603 | if (CGF.getTarget().isBigEndian()) { |
604 | Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width); |
605 | V = CGF.Builder.CreateLShr(V, ShiftCst); |
606 | } |
607 | |
608 | |
609 | IntTy = llvm::IntegerType::get(C, Width); |
610 | V = CGF.Builder.CreateTrunc(V, IntTy); |
611 | } |
612 | Value *Zero = llvm::Constant::getNullValue(IntTy); |
613 | return CGF.Builder.CreateICmpSLT(V, Zero); |
614 | } |
615 | |
616 | static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD, |
617 | const CallExpr *E, llvm::Constant *calleeValue) { |
618 | CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD)); |
619 | return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); |
620 | } |
621 | |
622 | |
623 | |
624 | |
625 | |
626 | |
627 | |
628 | |
629 | |
630 | |
631 | static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, |
632 | const llvm::Intrinsic::ID IntrinsicID, |
633 | llvm::Value *X, llvm::Value *Y, |
634 | llvm::Value *&Carry) { |
635 | |
636 | assert(X->getType() == Y->getType() && |
637 | "Arguments must be the same type. (Did you forget to make sure both " |
638 | "arguments have the same integer width?)"); |
639 | |
640 | Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); |
641 | llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y}); |
642 | Carry = CGF.Builder.CreateExtractValue(Tmp, 1); |
643 | return CGF.Builder.CreateExtractValue(Tmp, 0); |
644 | } |
645 | |
646 | static Value *emitRangedBuiltin(CodeGenFunction &CGF, |
647 | unsigned IntrinsicID, |
648 | int low, int high) { |
649 | llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
650 | llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); |
651 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); |
652 | llvm::Instruction *Call = CGF.Builder.CreateCall(F); |
653 | Call->setMetadata(llvm::LLVMContext::MD_range, RNode); |
654 | return Call; |
655 | } |
656 | |
657 | namespace { |
658 | struct WidthAndSignedness { |
659 | unsigned Width; |
660 | bool Signed; |
661 | }; |
662 | } |
663 | |
664 | static WidthAndSignedness |
665 | getIntegerWidthAndSignedness(const clang::ASTContext &context, |
666 | const clang::QualType Type) { |
667 | assert(Type->isIntegerType() && "Given type is not an integer."); |
668 | unsigned Width = Type->isBooleanType() ? 1 |
669 | : Type->isExtIntType() ? context.getIntWidth(Type) |
670 | : context.getTypeInfo(Type).Width; |
671 | bool Signed = Type->isSignedIntegerType(); |
672 | return {Width, Signed}; |
673 | } |
674 | |
675 | |
676 | |
677 | |
678 | static struct WidthAndSignedness |
679 | EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { |
680 | assert(Types.size() > 0 && "Empty list of types."); |
681 | |
682 | |
683 | bool Signed = false; |
684 | for (const auto &Type : Types) { |
685 | Signed |= Type.Signed; |
686 | } |
687 | |
688 | |
689 | |
690 | |
691 | |
692 | unsigned Width = 0; |
693 | for (const auto &Type : Types) { |
694 | unsigned MinWidth = Type.Width + (Signed && !Type.Signed); |
695 | if (Width < MinWidth) { |
696 | Width = MinWidth; |
697 | } |
698 | } |
699 | |
700 | return {Width, Signed}; |
701 | } |
702 | |
703 | Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { |
704 | llvm::Type *DestType = Int8PtrTy; |
705 | if (ArgValue->getType() != DestType) |
706 | ArgValue = |
707 | Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data()); |
708 | |
709 | Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend; |
710 | return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue); |
711 | } |
712 | |
713 | |
714 | |
715 | static bool areBOSTypesCompatible(int From, int To) { |
716 | |
717 | |
718 | |
719 | return From == To || (From == 0 && To == 1) || (From == 3 && To == 2); |
720 | } |
721 | |
722 | static llvm::Value * |
723 | getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { |
724 | return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, true); |
725 | } |
726 | |
727 | llvm::Value * |
728 | CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, |
729 | llvm::IntegerType *ResType, |
730 | llvm::Value *EmittedE, |
731 | bool IsDynamic) { |
732 | uint64_t ObjectSize; |
733 | if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) |
734 | return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); |
735 | return ConstantInt::get(ResType, ObjectSize, true); |
736 | } |
737 | |
738 | |
739 | |
740 | |
741 | |
742 | |
743 | |
744 | |
745 | |
746 | |
747 | llvm::Value * |
748 | CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, |
749 | llvm::IntegerType *ResType, |
750 | llvm::Value *EmittedE, bool IsDynamic) { |
751 | |
752 | |
753 | if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { |
754 | auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); |
755 | auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); |
756 | if (Param != nullptr && PS != nullptr && |
757 | areBOSTypesCompatible(PS->getType(), Type)) { |
758 | auto Iter = SizeArguments.find(Param); |
759 | assert(Iter != SizeArguments.end()); |
760 | |
761 | const ImplicitParamDecl *D = Iter->second; |
762 | auto DIter = LocalDeclMap.find(D); |
763 | assert(DIter != LocalDeclMap.end()); |
764 | |
765 | return EmitLoadOfScalar(DIter->second, false, |
766 | getContext().getSizeType(), E->getBeginLoc()); |
767 | } |
768 | } |
769 | |
770 | |
771 | |
772 | |
773 | if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) |
774 | return getDefaultBuiltinObjectSizeResult(Type, ResType); |
775 | |
776 | Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E); |
777 | assert(Ptr->getType()->isPointerTy() && |
778 | "Non-pointer passed to __builtin_object_size?"); |
779 | |
780 | Function *F = |
781 | CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()}); |
782 | |
783 | |
784 | Value *Min = Builder.getInt1((Type & 2) != 0); |
785 | |
786 | Value *NullIsUnknown = Builder.getTrue(); |
787 | Value *Dynamic = Builder.getInt1(IsDynamic); |
788 | return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}); |
789 | } |
790 | |
791 | namespace { |
792 | |
793 | struct BitTest { |
794 | enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set }; |
795 | enum InterlockingKind : uint8_t { |
796 | Unlocked, |
797 | Sequential, |
798 | Acquire, |
799 | Release, |
800 | NoFence |
801 | }; |
802 | |
803 | ActionKind Action; |
804 | InterlockingKind Interlocking; |
805 | bool Is64Bit; |
806 | |
807 | static BitTest decodeBitTestBuiltin(unsigned BuiltinID); |
808 | }; |
809 | } |
810 | |
811 | BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) { |
812 | switch (BuiltinID) { |
813 | |
814 | case Builtin::BI_bittest: |
815 | return {TestOnly, Unlocked, false}; |
816 | case Builtin::BI_bittestandcomplement: |
817 | return {Complement, Unlocked, false}; |
818 | case Builtin::BI_bittestandreset: |
819 | return {Reset, Unlocked, false}; |
820 | case Builtin::BI_bittestandset: |
821 | return {Set, Unlocked, false}; |
822 | case Builtin::BI_interlockedbittestandreset: |
823 | return {Reset, Sequential, false}; |
824 | case Builtin::BI_interlockedbittestandset: |
825 | return {Set, Sequential, false}; |
826 | |
827 | |
828 | case Builtin::BI_bittest64: |
829 | return {TestOnly, Unlocked, true}; |
830 | case Builtin::BI_bittestandcomplement64: |
831 | return {Complement, Unlocked, true}; |
832 | case Builtin::BI_bittestandreset64: |
833 | return {Reset, Unlocked, true}; |
834 | case Builtin::BI_bittestandset64: |
835 | return {Set, Unlocked, true}; |
836 | case Builtin::BI_interlockedbittestandreset64: |
837 | return {Reset, Sequential, true}; |
838 | case Builtin::BI_interlockedbittestandset64: |
839 | return {Set, Sequential, true}; |
840 | |
841 | |
842 | case Builtin::BI_interlockedbittestandset_acq: |
843 | return {Set, Acquire, false}; |
844 | case Builtin::BI_interlockedbittestandset_rel: |
845 | return {Set, Release, false}; |
846 | case Builtin::BI_interlockedbittestandset_nf: |
847 | return {Set, NoFence, false}; |
848 | case Builtin::BI_interlockedbittestandreset_acq: |
849 | return {Reset, Acquire, false}; |
850 | case Builtin::BI_interlockedbittestandreset_rel: |
851 | return {Reset, Release, false}; |
852 | case Builtin::BI_interlockedbittestandreset_nf: |
853 | return {Reset, NoFence, false}; |
854 | } |
855 | llvm_unreachable("expected only bittest intrinsics"); |
856 | } |
857 | |
858 | static char bitActionToX86BTCode(BitTest::ActionKind A) { |
859 | switch (A) { |
860 | case BitTest::TestOnly: return '\0'; |
861 | case BitTest::Complement: return 'c'; |
862 | case BitTest::Reset: return 'r'; |
863 | case BitTest::Set: return 's'; |
864 | } |
865 | llvm_unreachable("invalid action"); |
866 | } |
867 | |
868 | static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, |
869 | BitTest BT, |
870 | const CallExpr *E, Value *BitBase, |
871 | Value *BitPos) { |
872 | char Action = bitActionToX86BTCode(BT.Action); |
873 | char SizeSuffix = BT.Is64Bit ? 'q' : 'l'; |
874 | |
875 | |
876 | SmallString<64> Asm; |
877 | raw_svector_ostream AsmOS(Asm); |
878 | if (BT.Interlocking != BitTest::Unlocked) |
879 | AsmOS << "lock "; |
880 | AsmOS << "bt"; |
881 | if (Action) |
882 | AsmOS << Action; |
883 | AsmOS << SizeSuffix << " $2, ($1)"; |
884 | |
885 | |
886 | std::string Constraints = "={@ccc},r,r,~{cc},~{memory}"; |
887 | std::string MachineClobbers = CGF.getTarget().getClobbers(); |
888 | if (!MachineClobbers.empty()) { |
889 | Constraints += ','; |
890 | Constraints += MachineClobbers; |
891 | } |
892 | llvm::IntegerType *IntType = llvm::IntegerType::get( |
893 | CGF.getLLVMContext(), |
894 | CGF.getContext().getTypeSize(E->getArg(1)->getType())); |
895 | llvm::Type *IntPtrType = IntType->getPointerTo(); |
896 | llvm::FunctionType *FTy = |
897 | llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false); |
898 | |
899 | llvm::InlineAsm *IA = |
900 | llvm::InlineAsm::get(FTy, Asm, Constraints, true); |
901 | return CGF.Builder.CreateCall(IA, {BitBase, BitPos}); |
902 | } |
903 | |
904 | static llvm::AtomicOrdering |
905 | getBitTestAtomicOrdering(BitTest::InterlockingKind I) { |
906 | switch (I) { |
907 | case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic; |
908 | case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent; |
909 | case BitTest::Acquire: return llvm::AtomicOrdering::Acquire; |
910 | case BitTest::Release: return llvm::AtomicOrdering::Release; |
911 | case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic; |
912 | } |
913 | llvm_unreachable("invalid interlocking"); |
914 | } |
915 | |
916 | |
917 | |
918 | |
919 | |
920 | static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF, |
921 | unsigned BuiltinID, |
922 | const CallExpr *E) { |
923 | Value *BitBase = CGF.EmitScalarExpr(E->getArg(0)); |
924 | Value *BitPos = CGF.EmitScalarExpr(E->getArg(1)); |
925 | |
926 | BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID); |
927 | |
928 | |
929 | |
930 | if (CGF.getTarget().getTriple().isX86()) |
931 | return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos); |
932 | |
933 | |
934 | |
935 | |
936 | |
937 | Value *ByteIndex = CGF.Builder.CreateAShr( |
938 | BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx"); |
939 | Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy); |
940 | Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8, |
941 | ByteIndex, "bittest.byteaddr"), |
942 | CharUnits::One()); |
943 | Value *PosLow = |
944 | CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty), |
945 | llvm::ConstantInt::get(CGF.Int8Ty, 0x7)); |
946 | |
947 | |
948 | Value *Mask = nullptr; |
949 | if (BT.Action != BitTest::TestOnly) { |
950 | Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow, |
951 | "bittest.mask"); |
952 | } |
953 | |
954 | |
955 | llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking); |
956 | |
957 | Value *OldByte = nullptr; |
958 | if (Ordering != llvm::AtomicOrdering::NotAtomic) { |
959 | |
960 | |
961 | llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or; |
962 | if (BT.Action == BitTest::Reset) { |
963 | Mask = CGF.Builder.CreateNot(Mask); |
964 | RMWOp = llvm::AtomicRMWInst::And; |
965 | } |
966 | OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask, |
967 | Ordering); |
968 | } else { |
969 | |
970 | OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte"); |
971 | Value *NewByte = nullptr; |
972 | switch (BT.Action) { |
973 | case BitTest::TestOnly: |
974 | |
975 | break; |
976 | case BitTest::Complement: |
977 | NewByte = CGF.Builder.CreateXor(OldByte, Mask); |
978 | break; |
979 | case BitTest::Reset: |
980 | NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask)); |
981 | break; |
982 | case BitTest::Set: |
983 | NewByte = CGF.Builder.CreateOr(OldByte, Mask); |
984 | break; |
985 | } |
986 | if (NewByte) |
987 | CGF.Builder.CreateStore(NewByte, ByteAddr); |
988 | } |
989 | |
990 | |
991 | |
992 | Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr"); |
993 | return CGF.Builder.CreateAnd( |
994 | ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res"); |
995 | } |
996 | |
997 | static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF, |
998 | unsigned BuiltinID, |
999 | const CallExpr *E) { |
1000 | Value *Addr = CGF.EmitScalarExpr(E->getArg(0)); |
1001 | |
1002 | SmallString<64> Asm; |
1003 | raw_svector_ostream AsmOS(Asm); |
1004 | llvm::IntegerType *RetType = CGF.Int32Ty; |
1005 | |
1006 | switch (BuiltinID) { |
1007 | case clang::PPC::BI__builtin_ppc_ldarx: |
1008 | AsmOS << "ldarx "; |
1009 | RetType = CGF.Int64Ty; |
1010 | break; |
1011 | case clang::PPC::BI__builtin_ppc_lwarx: |
1012 | AsmOS << "lwarx "; |
1013 | RetType = CGF.Int32Ty; |
1014 | break; |
1015 | case clang::PPC::BI__builtin_ppc_lharx: |
1016 | AsmOS << "lharx "; |
1017 | RetType = CGF.Int16Ty; |
1018 | break; |
1019 | case clang::PPC::BI__builtin_ppc_lbarx: |
1020 | AsmOS << "lbarx "; |
1021 | RetType = CGF.Int8Ty; |
1022 | break; |
1023 | default: |
1024 | llvm_unreachable("Expected only PowerPC load reserve intrinsics"); |
1025 | } |
1026 | |
1027 | AsmOS << "$0, ${1:y}"; |
1028 | |
1029 | std::string Constraints = "=r,*Z,~{memory}"; |
1030 | std::string MachineClobbers = CGF.getTarget().getClobbers(); |
1031 | if (!MachineClobbers.empty()) { |
1032 | Constraints += ','; |
1033 | Constraints += MachineClobbers; |
1034 | } |
1035 | |
1036 | llvm::Type *IntPtrType = RetType->getPointerTo(); |
1037 | llvm::FunctionType *FTy = |
1038 | llvm::FunctionType::get(RetType, {IntPtrType}, false); |
1039 | |
1040 | llvm::InlineAsm *IA = |
1041 | llvm::InlineAsm::get(FTy, Asm, Constraints, true); |
1042 | return CGF.Builder.CreateCall(IA, {Addr}); |
1043 | } |
1044 | |
1045 | namespace { |
1046 | enum class MSVCSetJmpKind { |
1047 | _setjmpex, |
1048 | _setjmp3, |
1049 | _setjmp |
1050 | }; |
1051 | } |
1052 | |
1053 | |
1054 | |
1055 | |
1056 | static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, |
1057 | const CallExpr *E) { |
1058 | llvm::Value *Arg1 = nullptr; |
1059 | llvm::Type *Arg1Ty = nullptr; |
1060 | StringRef Name; |
1061 | bool IsVarArg = false; |
1062 | if (SJKind == MSVCSetJmpKind::_setjmp3) { |
1063 | Name = "_setjmp3"; |
1064 | Arg1Ty = CGF.Int32Ty; |
1065 | Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0); |
1066 | IsVarArg = true; |
1067 | } else { |
1068 | Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex"; |
1069 | Arg1Ty = CGF.Int8PtrTy; |
1070 | if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) { |
1071 | Arg1 = CGF.Builder.CreateCall( |
1072 | CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy)); |
1073 | } else |
1074 | Arg1 = CGF.Builder.CreateCall( |
1075 | CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy), |
1076 | llvm::ConstantInt::get(CGF.Int32Ty, 0)); |
1077 | } |
1078 | |
1079 | |
1080 | llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty}; |
1081 | llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get( |
1082 | CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, |
1083 | llvm::Attribute::ReturnsTwice); |
1084 | llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction( |
1085 | llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name, |
1086 | ReturnsTwiceAttr, true); |
1087 | |
1088 | llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast( |
1089 | CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy); |
1090 | llvm::Value *Args[] = {Buf, Arg1}; |
1091 | llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args); |
1092 | CB->setAttributes(ReturnsTwiceAttr); |
1093 | return RValue::get(CB); |
1094 | } |
1095 | |
1096 | |
1097 | |
1098 | enum class CodeGenFunction::MSVCIntrin { |
1099 | _BitScanForward, |
1100 | _BitScanReverse, |
1101 | _InterlockedAnd, |
1102 | _InterlockedDecrement, |
1103 | _InterlockedExchange, |
1104 | _InterlockedExchangeAdd, |
1105 | _InterlockedExchangeSub, |
1106 | _InterlockedIncrement, |
1107 | _InterlockedOr, |
1108 | _InterlockedXor, |
1109 | _InterlockedExchangeAdd_acq, |
1110 | _InterlockedExchangeAdd_rel, |
1111 | _InterlockedExchangeAdd_nf, |
1112 | _InterlockedExchange_acq, |
1113 | _InterlockedExchange_rel, |
1114 | _InterlockedExchange_nf, |
1115 | _InterlockedCompareExchange_acq, |
1116 | _InterlockedCompareExchange_rel, |
1117 | _InterlockedCompareExchange_nf, |
1118 | _InterlockedCompareExchange128, |
1119 | _InterlockedCompareExchange128_acq, |
1120 | _InterlockedCompareExchange128_rel, |
1121 | _InterlockedCompareExchange128_nf, |
1122 | _InterlockedOr_acq, |
1123 | _InterlockedOr_rel, |
1124 | _InterlockedOr_nf, |
1125 | _InterlockedXor_acq, |
1126 | _InterlockedXor_rel, |
1127 | _InterlockedXor_nf, |
1128 | _InterlockedAnd_acq, |
1129 | _InterlockedAnd_rel, |
1130 | _InterlockedAnd_nf, |
1131 | _InterlockedIncrement_acq, |
1132 | _InterlockedIncrement_rel, |
1133 | _InterlockedIncrement_nf, |
1134 | _InterlockedDecrement_acq, |
1135 | _InterlockedDecrement_rel, |
1136 | _InterlockedDecrement_nf, |
1137 | __fastfail, |
1138 | }; |
1139 | |
1140 | static Optional<CodeGenFunction::MSVCIntrin> |
1141 | translateArmToMsvcIntrin(unsigned BuiltinID) { |
1142 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1143 | switch (BuiltinID) { |
1144 | default: |
1145 | return None; |
1146 | case ARM::BI_BitScanForward: |
1147 | case ARM::BI_BitScanForward64: |
1148 | return MSVCIntrin::_BitScanForward; |
1149 | case ARM::BI_BitScanReverse: |
1150 | case ARM::BI_BitScanReverse64: |
1151 | return MSVCIntrin::_BitScanReverse; |
1152 | case ARM::BI_InterlockedAnd64: |
1153 | return MSVCIntrin::_InterlockedAnd; |
1154 | case ARM::BI_InterlockedExchange64: |
1155 | return MSVCIntrin::_InterlockedExchange; |
1156 | case ARM::BI_InterlockedExchangeAdd64: |
1157 | return MSVCIntrin::_InterlockedExchangeAdd; |
1158 | case ARM::BI_InterlockedExchangeSub64: |
1159 | return MSVCIntrin::_InterlockedExchangeSub; |
1160 | case ARM::BI_InterlockedOr64: |
1161 | return MSVCIntrin::_InterlockedOr; |
1162 | case ARM::BI_InterlockedXor64: |
1163 | return MSVCIntrin::_InterlockedXor; |
1164 | case ARM::BI_InterlockedDecrement64: |
1165 | return MSVCIntrin::_InterlockedDecrement; |
1166 | case ARM::BI_InterlockedIncrement64: |
1167 | return MSVCIntrin::_InterlockedIncrement; |
1168 | case ARM::BI_InterlockedExchangeAdd8_acq: |
1169 | case ARM::BI_InterlockedExchangeAdd16_acq: |
1170 | case ARM::BI_InterlockedExchangeAdd_acq: |
1171 | case ARM::BI_InterlockedExchangeAdd64_acq: |
1172 | return MSVCIntrin::_InterlockedExchangeAdd_acq; |
1173 | case ARM::BI_InterlockedExchangeAdd8_rel: |
1174 | case ARM::BI_InterlockedExchangeAdd16_rel: |
1175 | case ARM::BI_InterlockedExchangeAdd_rel: |
1176 | case ARM::BI_InterlockedExchangeAdd64_rel: |
1177 | return MSVCIntrin::_InterlockedExchangeAdd_rel; |
1178 | case ARM::BI_InterlockedExchangeAdd8_nf: |
1179 | case ARM::BI_InterlockedExchangeAdd16_nf: |
1180 | case ARM::BI_InterlockedExchangeAdd_nf: |
1181 | case ARM::BI_InterlockedExchangeAdd64_nf: |
1182 | return MSVCIntrin::_InterlockedExchangeAdd_nf; |
1183 | case ARM::BI_InterlockedExchange8_acq: |
1184 | case ARM::BI_InterlockedExchange16_acq: |
1185 | case ARM::BI_InterlockedExchange_acq: |
1186 | case ARM::BI_InterlockedExchange64_acq: |
1187 | return MSVCIntrin::_InterlockedExchange_acq; |
1188 | case ARM::BI_InterlockedExchange8_rel: |
1189 | case ARM::BI_InterlockedExchange16_rel: |
1190 | case ARM::BI_InterlockedExchange_rel: |
1191 | case ARM::BI_InterlockedExchange64_rel: |
1192 | return MSVCIntrin::_InterlockedExchange_rel; |
1193 | case ARM::BI_InterlockedExchange8_nf: |
1194 | case ARM::BI_InterlockedExchange16_nf: |
1195 | case ARM::BI_InterlockedExchange_nf: |
1196 | case ARM::BI_InterlockedExchange64_nf: |
1197 | return MSVCIntrin::_InterlockedExchange_nf; |
1198 | case ARM::BI_InterlockedCompareExchange8_acq: |
1199 | case ARM::BI_InterlockedCompareExchange16_acq: |
1200 | case ARM::BI_InterlockedCompareExchange_acq: |
1201 | case ARM::BI_InterlockedCompareExchange64_acq: |
1202 | return MSVCIntrin::_InterlockedCompareExchange_acq; |
1203 | case ARM::BI_InterlockedCompareExchange8_rel: |
1204 | case ARM::BI_InterlockedCompareExchange16_rel: |
1205 | case ARM::BI_InterlockedCompareExchange_rel: |
1206 | case ARM::BI_InterlockedCompareExchange64_rel: |
1207 | return MSVCIntrin::_InterlockedCompareExchange_rel; |
1208 | case ARM::BI_InterlockedCompareExchange8_nf: |
1209 | case ARM::BI_InterlockedCompareExchange16_nf: |
1210 | case ARM::BI_InterlockedCompareExchange_nf: |
1211 | case ARM::BI_InterlockedCompareExchange64_nf: |
1212 | return MSVCIntrin::_InterlockedCompareExchange_nf; |
1213 | case ARM::BI_InterlockedOr8_acq: |
1214 | case ARM::BI_InterlockedOr16_acq: |
1215 | case ARM::BI_InterlockedOr_acq: |
1216 | case ARM::BI_InterlockedOr64_acq: |
1217 | return MSVCIntrin::_InterlockedOr_acq; |
1218 | case ARM::BI_InterlockedOr8_rel: |
1219 | case ARM::BI_InterlockedOr16_rel: |
1220 | case ARM::BI_InterlockedOr_rel: |
1221 | case ARM::BI_InterlockedOr64_rel: |
1222 | return MSVCIntrin::_InterlockedOr_rel; |
1223 | case ARM::BI_InterlockedOr8_nf: |
1224 | case ARM::BI_InterlockedOr16_nf: |
1225 | case ARM::BI_InterlockedOr_nf: |
1226 | case ARM::BI_InterlockedOr64_nf: |
1227 | return MSVCIntrin::_InterlockedOr_nf; |
1228 | case ARM::BI_InterlockedXor8_acq: |
1229 | case ARM::BI_InterlockedXor16_acq: |
1230 | case ARM::BI_InterlockedXor_acq: |
1231 | case ARM::BI_InterlockedXor64_acq: |
1232 | return MSVCIntrin::_InterlockedXor_acq; |
1233 | case ARM::BI_InterlockedXor8_rel: |
1234 | case ARM::BI_InterlockedXor16_rel: |
1235 | case ARM::BI_InterlockedXor_rel: |
1236 | case ARM::BI_InterlockedXor64_rel: |
1237 | return MSVCIntrin::_InterlockedXor_rel; |
1238 | case ARM::BI_InterlockedXor8_nf: |
1239 | case ARM::BI_InterlockedXor16_nf: |
1240 | case ARM::BI_InterlockedXor_nf: |
1241 | case ARM::BI_InterlockedXor64_nf: |
1242 | return MSVCIntrin::_InterlockedXor_nf; |
1243 | case ARM::BI_InterlockedAnd8_acq: |
1244 | case ARM::BI_InterlockedAnd16_acq: |
1245 | case ARM::BI_InterlockedAnd_acq: |
1246 | case ARM::BI_InterlockedAnd64_acq: |
1247 | return MSVCIntrin::_InterlockedAnd_acq; |
1248 | case ARM::BI_InterlockedAnd8_rel: |
1249 | case ARM::BI_InterlockedAnd16_rel: |
1250 | case ARM::BI_InterlockedAnd_rel: |
1251 | case ARM::BI_InterlockedAnd64_rel: |
1252 | return MSVCIntrin::_InterlockedAnd_rel; |
1253 | case ARM::BI_InterlockedAnd8_nf: |
1254 | case ARM::BI_InterlockedAnd16_nf: |
1255 | case ARM::BI_InterlockedAnd_nf: |
1256 | case ARM::BI_InterlockedAnd64_nf: |
1257 | return MSVCIntrin::_InterlockedAnd_nf; |
1258 | case ARM::BI_InterlockedIncrement16_acq: |
1259 | case ARM::BI_InterlockedIncrement_acq: |
1260 | case ARM::BI_InterlockedIncrement64_acq: |
1261 | return MSVCIntrin::_InterlockedIncrement_acq; |
1262 | case ARM::BI_InterlockedIncrement16_rel: |
1263 | case ARM::BI_InterlockedIncrement_rel: |
1264 | case ARM::BI_InterlockedIncrement64_rel: |
1265 | return MSVCIntrin::_InterlockedIncrement_rel; |
1266 | case ARM::BI_InterlockedIncrement16_nf: |
1267 | case ARM::BI_InterlockedIncrement_nf: |
1268 | case ARM::BI_InterlockedIncrement64_nf: |
1269 | return MSVCIntrin::_InterlockedIncrement_nf; |
1270 | case ARM::BI_InterlockedDecrement16_acq: |
1271 | case ARM::BI_InterlockedDecrement_acq: |
1272 | case ARM::BI_InterlockedDecrement64_acq: |
1273 | return MSVCIntrin::_InterlockedDecrement_acq; |
1274 | case ARM::BI_InterlockedDecrement16_rel: |
1275 | case ARM::BI_InterlockedDecrement_rel: |
1276 | case ARM::BI_InterlockedDecrement64_rel: |
1277 | return MSVCIntrin::_InterlockedDecrement_rel; |
1278 | case ARM::BI_InterlockedDecrement16_nf: |
1279 | case ARM::BI_InterlockedDecrement_nf: |
1280 | case ARM::BI_InterlockedDecrement64_nf: |
1281 | return MSVCIntrin::_InterlockedDecrement_nf; |
1282 | } |
1283 | llvm_unreachable("must return from switch"); |
1284 | } |
1285 | |
1286 | static Optional<CodeGenFunction::MSVCIntrin> |
1287 | translateAarch64ToMsvcIntrin(unsigned BuiltinID) { |
1288 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1289 | switch (BuiltinID) { |
1290 | default: |
1291 | return None; |
1292 | case AArch64::BI_BitScanForward: |
1293 | case AArch64::BI_BitScanForward64: |
1294 | return MSVCIntrin::_BitScanForward; |
1295 | case AArch64::BI_BitScanReverse: |
1296 | case AArch64::BI_BitScanReverse64: |
1297 | return MSVCIntrin::_BitScanReverse; |
1298 | case AArch64::BI_InterlockedAnd64: |
1299 | return MSVCIntrin::_InterlockedAnd; |
1300 | case AArch64::BI_InterlockedExchange64: |
1301 | return MSVCIntrin::_InterlockedExchange; |
1302 | case AArch64::BI_InterlockedExchangeAdd64: |
1303 | return MSVCIntrin::_InterlockedExchangeAdd; |
1304 | case AArch64::BI_InterlockedExchangeSub64: |
1305 | return MSVCIntrin::_InterlockedExchangeSub; |
1306 | case AArch64::BI_InterlockedOr64: |
1307 | return MSVCIntrin::_InterlockedOr; |
1308 | case AArch64::BI_InterlockedXor64: |
1309 | return MSVCIntrin::_InterlockedXor; |
1310 | case AArch64::BI_InterlockedDecrement64: |
1311 | return MSVCIntrin::_InterlockedDecrement; |
1312 | case AArch64::BI_InterlockedIncrement64: |
1313 | return MSVCIntrin::_InterlockedIncrement; |
1314 | case AArch64::BI_InterlockedExchangeAdd8_acq: |
1315 | case AArch64::BI_InterlockedExchangeAdd16_acq: |
1316 | case AArch64::BI_InterlockedExchangeAdd_acq: |
1317 | case AArch64::BI_InterlockedExchangeAdd64_acq: |
1318 | return MSVCIntrin::_InterlockedExchangeAdd_acq; |
1319 | case AArch64::BI_InterlockedExchangeAdd8_rel: |
1320 | case AArch64::BI_InterlockedExchangeAdd16_rel: |
1321 | case AArch64::BI_InterlockedExchangeAdd_rel: |
1322 | case AArch64::BI_InterlockedExchangeAdd64_rel: |
1323 | return MSVCIntrin::_InterlockedExchangeAdd_rel; |
1324 | case AArch64::BI_InterlockedExchangeAdd8_nf: |
1325 | case AArch64::BI_InterlockedExchangeAdd16_nf: |
1326 | case AArch64::BI_InterlockedExchangeAdd_nf: |
1327 | case AArch64::BI_InterlockedExchangeAdd64_nf: |
1328 | return MSVCIntrin::_InterlockedExchangeAdd_nf; |
1329 | case AArch64::BI_InterlockedExchange8_acq: |
1330 | case AArch64::BI_InterlockedExchange16_acq: |
1331 | case AArch64::BI_InterlockedExchange_acq: |
1332 | case AArch64::BI_InterlockedExchange64_acq: |
1333 | return MSVCIntrin::_InterlockedExchange_acq; |
1334 | case AArch64::BI_InterlockedExchange8_rel: |
1335 | case AArch64::BI_InterlockedExchange16_rel: |
1336 | case AArch64::BI_InterlockedExchange_rel: |
1337 | case AArch64::BI_InterlockedExchange64_rel: |
1338 | return MSVCIntrin::_InterlockedExchange_rel; |
1339 | case AArch64::BI_InterlockedExchange8_nf: |
1340 | case AArch64::BI_InterlockedExchange16_nf: |
1341 | case AArch64::BI_InterlockedExchange_nf: |
1342 | case AArch64::BI_InterlockedExchange64_nf: |
1343 | return MSVCIntrin::_InterlockedExchange_nf; |
1344 | case AArch64::BI_InterlockedCompareExchange8_acq: |
1345 | case AArch64::BI_InterlockedCompareExchange16_acq: |
1346 | case AArch64::BI_InterlockedCompareExchange_acq: |
1347 | case AArch64::BI_InterlockedCompareExchange64_acq: |
1348 | return MSVCIntrin::_InterlockedCompareExchange_acq; |
1349 | case AArch64::BI_InterlockedCompareExchange8_rel: |
1350 | case AArch64::BI_InterlockedCompareExchange16_rel: |
1351 | case AArch64::BI_InterlockedCompareExchange_rel: |
1352 | case AArch64::BI_InterlockedCompareExchange64_rel: |
1353 | return MSVCIntrin::_InterlockedCompareExchange_rel; |
1354 | case AArch64::BI_InterlockedCompareExchange8_nf: |
1355 | case AArch64::BI_InterlockedCompareExchange16_nf: |
1356 | case AArch64::BI_InterlockedCompareExchange_nf: |
1357 | case AArch64::BI_InterlockedCompareExchange64_nf: |
1358 | return MSVCIntrin::_InterlockedCompareExchange_nf; |
1359 | case AArch64::BI_InterlockedCompareExchange128: |
1360 | return MSVCIntrin::_InterlockedCompareExchange128; |
1361 | case AArch64::BI_InterlockedCompareExchange128_acq: |
1362 | return MSVCIntrin::_InterlockedCompareExchange128_acq; |
1363 | case AArch64::BI_InterlockedCompareExchange128_nf: |
1364 | return MSVCIntrin::_InterlockedCompareExchange128_nf; |
1365 | case AArch64::BI_InterlockedCompareExchange128_rel: |
1366 | return MSVCIntrin::_InterlockedCompareExchange128_rel; |
1367 | case AArch64::BI_InterlockedOr8_acq: |
1368 | case AArch64::BI_InterlockedOr16_acq: |
1369 | case AArch64::BI_InterlockedOr_acq: |
1370 | case AArch64::BI_InterlockedOr64_acq: |
1371 | return MSVCIntrin::_InterlockedOr_acq; |
1372 | case AArch64::BI_InterlockedOr8_rel: |
1373 | case AArch64::BI_InterlockedOr16_rel: |
1374 | case AArch64::BI_InterlockedOr_rel: |
1375 | case AArch64::BI_InterlockedOr64_rel: |
1376 | return MSVCIntrin::_InterlockedOr_rel; |
1377 | case AArch64::BI_InterlockedOr8_nf: |
1378 | case AArch64::BI_InterlockedOr16_nf: |
1379 | case AArch64::BI_InterlockedOr_nf: |
1380 | case AArch64::BI_InterlockedOr64_nf: |
1381 | return MSVCIntrin::_InterlockedOr_nf; |
1382 | case AArch64::BI_InterlockedXor8_acq: |
1383 | case AArch64::BI_InterlockedXor16_acq: |
1384 | case AArch64::BI_InterlockedXor_acq: |
1385 | case AArch64::BI_InterlockedXor64_acq: |
1386 | return MSVCIntrin::_InterlockedXor_acq; |
1387 | case AArch64::BI_InterlockedXor8_rel: |
1388 | case AArch64::BI_InterlockedXor16_rel: |
1389 | case AArch64::BI_InterlockedXor_rel: |
1390 | case AArch64::BI_InterlockedXor64_rel: |
1391 | return MSVCIntrin::_InterlockedXor_rel; |
1392 | case AArch64::BI_InterlockedXor8_nf: |
1393 | case AArch64::BI_InterlockedXor16_nf: |
1394 | case AArch64::BI_InterlockedXor_nf: |
1395 | case AArch64::BI_InterlockedXor64_nf: |
1396 | return MSVCIntrin::_InterlockedXor_nf; |
1397 | case AArch64::BI_InterlockedAnd8_acq: |
1398 | case AArch64::BI_InterlockedAnd16_acq: |
1399 | case AArch64::BI_InterlockedAnd_acq: |
1400 | case AArch64::BI_InterlockedAnd64_acq: |
1401 | return MSVCIntrin::_InterlockedAnd_acq; |
1402 | case AArch64::BI_InterlockedAnd8_rel: |
1403 | case AArch64::BI_InterlockedAnd16_rel: |
1404 | case AArch64::BI_InterlockedAnd_rel: |
1405 | case AArch64::BI_InterlockedAnd64_rel: |
1406 | return MSVCIntrin::_InterlockedAnd_rel; |
1407 | case AArch64::BI_InterlockedAnd8_nf: |
1408 | case AArch64::BI_InterlockedAnd16_nf: |
1409 | case AArch64::BI_InterlockedAnd_nf: |
1410 | case AArch64::BI_InterlockedAnd64_nf: |
1411 | return MSVCIntrin::_InterlockedAnd_nf; |
1412 | case AArch64::BI_InterlockedIncrement16_acq: |
1413 | case AArch64::BI_InterlockedIncrement_acq: |
1414 | case AArch64::BI_InterlockedIncrement64_acq: |
1415 | return MSVCIntrin::_InterlockedIncrement_acq; |
1416 | case AArch64::BI_InterlockedIncrement16_rel: |
1417 | case AArch64::BI_InterlockedIncrement_rel: |
1418 | case AArch64::BI_InterlockedIncrement64_rel: |
1419 | return MSVCIntrin::_InterlockedIncrement_rel; |
1420 | case AArch64::BI_InterlockedIncrement16_nf: |
1421 | case AArch64::BI_InterlockedIncrement_nf: |
1422 | case AArch64::BI_InterlockedIncrement64_nf: |
1423 | return MSVCIntrin::_InterlockedIncrement_nf; |
1424 | case AArch64::BI_InterlockedDecrement16_acq: |
1425 | case AArch64::BI_InterlockedDecrement_acq: |
1426 | case AArch64::BI_InterlockedDecrement64_acq: |
1427 | return MSVCIntrin::_InterlockedDecrement_acq; |
1428 | case AArch64::BI_InterlockedDecrement16_rel: |
1429 | case AArch64::BI_InterlockedDecrement_rel: |
1430 | case AArch64::BI_InterlockedDecrement64_rel: |
1431 | return MSVCIntrin::_InterlockedDecrement_rel; |
1432 | case AArch64::BI_InterlockedDecrement16_nf: |
1433 | case AArch64::BI_InterlockedDecrement_nf: |
1434 | case AArch64::BI_InterlockedDecrement64_nf: |
1435 | return MSVCIntrin::_InterlockedDecrement_nf; |
1436 | } |
1437 | llvm_unreachable("must return from switch"); |
1438 | } |
1439 | |
1440 | static Optional<CodeGenFunction::MSVCIntrin> |
1441 | translateX86ToMsvcIntrin(unsigned BuiltinID) { |
1442 | using MSVCIntrin = CodeGenFunction::MSVCIntrin; |
1443 | switch (BuiltinID) { |
1444 | default: |
1445 | return None; |
1446 | case clang::X86::BI_BitScanForward: |
1447 | case clang::X86::BI_BitScanForward64: |
1448 | return MSVCIntrin::_BitScanForward; |
1449 | case clang::X86::BI_BitScanReverse: |
1450 | case clang::X86::BI_BitScanReverse64: |
1451 | return MSVCIntrin::_BitScanReverse; |
1452 | case clang::X86::BI_InterlockedAnd64: |
1453 | return MSVCIntrin::_InterlockedAnd; |
1454 | case clang::X86::BI_InterlockedCompareExchange128: |
1455 | return MSVCIntrin::_InterlockedCompareExchange128; |
1456 | case clang::X86::BI_InterlockedExchange64: |
1457 | return MSVCIntrin::_InterlockedExchange; |
1458 | case clang::X86::BI_InterlockedExchangeAdd64: |
1459 | return MSVCIntrin::_InterlockedExchangeAdd; |
1460 | case clang::X86::BI_InterlockedExchangeSub64: |
1461 | return MSVCIntrin::_InterlockedExchangeSub; |
1462 | case clang::X86::BI_InterlockedOr64: |
1463 | return MSVCIntrin::_InterlockedOr; |
1464 | case clang::X86::BI_InterlockedXor64: |
1465 | return MSVCIntrin::_InterlockedXor; |
1466 | case clang::X86::BI_InterlockedDecrement64: |
1467 | return MSVCIntrin::_InterlockedDecrement; |
1468 | case clang::X86::BI_InterlockedIncrement64: |
1469 | return MSVCIntrin::_InterlockedIncrement; |
1470 | } |
1471 | llvm_unreachable("must return from switch"); |
1472 | } |
1473 | |
1474 | |
1475 | Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, |
1476 | const CallExpr *E) { |
1477 | switch (BuiltinID) { |
1478 | case MSVCIntrin::_BitScanForward: |
1479 | case MSVCIntrin::_BitScanReverse: { |
1480 | Address IndexAddress(EmitPointerWithAlignment(E->getArg(0))); |
1481 | Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
1482 | |
1483 | llvm::Type *ArgType = ArgValue->getType(); |
1484 | llvm::Type *IndexType = |
1485 | IndexAddress.getPointer()->getType()->getPointerElementType(); |
1486 | llvm::Type *ResultType = ConvertType(E->getType()); |
1487 | |
1488 | Value *ArgZero = llvm::Constant::getNullValue(ArgType); |
1489 | Value *ResZero = llvm::Constant::getNullValue(ResultType); |
1490 | Value *ResOne = llvm::ConstantInt::get(ResultType, 1); |
1491 | |
1492 | BasicBlock *Begin = Builder.GetInsertBlock(); |
1493 | BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn); |
1494 | Builder.SetInsertPoint(End); |
1495 | PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result"); |
1496 | |
1497 | Builder.SetInsertPoint(Begin); |
1498 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero); |
1499 | BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn); |
1500 | Builder.CreateCondBr(IsZero, End, NotZero); |
1501 | Result->addIncoming(ResZero, Begin); |
1502 | |
1503 | Builder.SetInsertPoint(NotZero); |
1504 | |
1505 | if (BuiltinID == MSVCIntrin::_BitScanForward) { |
1506 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
1507 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
1508 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
1509 | Builder.CreateStore(ZeroCount, IndexAddress, false); |
1510 | } else { |
1511 | unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); |
1512 | Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1); |
1513 | |
1514 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
1515 | Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()}); |
1516 | ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false); |
1517 | Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount); |
1518 | Builder.CreateStore(Index, IndexAddress, false); |
1519 | } |
1520 | Builder.CreateBr(End); |
1521 | Result->addIncoming(ResOne, NotZero); |
1522 | |
1523 | Builder.SetInsertPoint(End); |
1524 | return Result; |
1525 | } |
1526 | case MSVCIntrin::_InterlockedAnd: |
1527 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E); |
1528 | case MSVCIntrin::_InterlockedExchange: |
1529 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E); |
1530 | case MSVCIntrin::_InterlockedExchangeAdd: |
1531 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E); |
1532 | case MSVCIntrin::_InterlockedExchangeSub: |
1533 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E); |
1534 | case MSVCIntrin::_InterlockedOr: |
1535 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E); |
1536 | case MSVCIntrin::_InterlockedXor: |
1537 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E); |
1538 | case MSVCIntrin::_InterlockedExchangeAdd_acq: |
1539 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
1540 | AtomicOrdering::Acquire); |
1541 | case MSVCIntrin::_InterlockedExchangeAdd_rel: |
1542 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
1543 | AtomicOrdering::Release); |
1544 | case MSVCIntrin::_InterlockedExchangeAdd_nf: |
1545 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
1546 | AtomicOrdering::Monotonic); |
1547 | case MSVCIntrin::_InterlockedExchange_acq: |
1548 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
1549 | AtomicOrdering::Acquire); |
1550 | case MSVCIntrin::_InterlockedExchange_rel: |
1551 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
1552 | AtomicOrdering::Release); |
1553 | case MSVCIntrin::_InterlockedExchange_nf: |
1554 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
1555 | AtomicOrdering::Monotonic); |
1556 | case MSVCIntrin::_InterlockedCompareExchange_acq: |
1557 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire); |
1558 | case MSVCIntrin::_InterlockedCompareExchange_rel: |
1559 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release); |
1560 | case MSVCIntrin::_InterlockedCompareExchange_nf: |
1561 | return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic); |
1562 | case MSVCIntrin::_InterlockedCompareExchange128: |
1563 | return EmitAtomicCmpXchg128ForMSIntrin( |
1564 | *this, E, AtomicOrdering::SequentiallyConsistent); |
1565 | case MSVCIntrin::_InterlockedCompareExchange128_acq: |
1566 | return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire); |
1567 | case MSVCIntrin::_InterlockedCompareExchange128_rel: |
1568 | return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release); |
1569 | case MSVCIntrin::_InterlockedCompareExchange128_nf: |
1570 | return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic); |
1571 | case MSVCIntrin::_InterlockedOr_acq: |
1572 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1573 | AtomicOrdering::Acquire); |
1574 | case MSVCIntrin::_InterlockedOr_rel: |
1575 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1576 | AtomicOrdering::Release); |
1577 | case MSVCIntrin::_InterlockedOr_nf: |
1578 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
1579 | AtomicOrdering::Monotonic); |
1580 | case MSVCIntrin::_InterlockedXor_acq: |
1581 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1582 | AtomicOrdering::Acquire); |
1583 | case MSVCIntrin::_InterlockedXor_rel: |
1584 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1585 | AtomicOrdering::Release); |
1586 | case MSVCIntrin::_InterlockedXor_nf: |
1587 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E, |
1588 | AtomicOrdering::Monotonic); |
1589 | case MSVCIntrin::_InterlockedAnd_acq: |
1590 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1591 | AtomicOrdering::Acquire); |
1592 | case MSVCIntrin::_InterlockedAnd_rel: |
1593 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1594 | AtomicOrdering::Release); |
1595 | case MSVCIntrin::_InterlockedAnd_nf: |
1596 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
1597 | AtomicOrdering::Monotonic); |
1598 | case MSVCIntrin::_InterlockedIncrement_acq: |
1599 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire); |
1600 | case MSVCIntrin::_InterlockedIncrement_rel: |
1601 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release); |
1602 | case MSVCIntrin::_InterlockedIncrement_nf: |
1603 | return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic); |
1604 | case MSVCIntrin::_InterlockedDecrement_acq: |
1605 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire); |
1606 | case MSVCIntrin::_InterlockedDecrement_rel: |
1607 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release); |
1608 | case MSVCIntrin::_InterlockedDecrement_nf: |
1609 | return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic); |
1610 | |
1611 | case MSVCIntrin::_InterlockedDecrement: |
1612 | return EmitAtomicDecrementValue(*this, E); |
1613 | case MSVCIntrin::_InterlockedIncrement: |
1614 | return EmitAtomicIncrementValue(*this, E); |
1615 | |
1616 | case MSVCIntrin::__fastfail: { |
1617 | |
1618 | |
1619 | |
1620 | llvm::Triple::ArchType ISA = getTarget().getTriple().getArch(); |
1621 | StringRef Asm, Constraints; |
1622 | switch (ISA) { |
1623 | default: |
1624 | ErrorUnsupported(E, "__fastfail call for this architecture"); |
1625 | break; |
1626 | case llvm::Triple::x86: |
1627 | case llvm::Triple::x86_64: |
1628 | Asm = "int $$0x29"; |
1629 | Constraints = "{cx}"; |
1630 | break; |
1631 | case llvm::Triple::thumb: |
1632 | Asm = "udf #251"; |
1633 | Constraints = "{r0}"; |
1634 | break; |
1635 | case llvm::Triple::aarch64: |
1636 | Asm = "brk #0xF003"; |
1637 | Constraints = "{w0}"; |
1638 | } |
1639 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false); |
1640 | llvm::InlineAsm *IA = |
1641 | llvm::InlineAsm::get(FTy, Asm, Constraints, true); |
1642 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
1643 | getLLVMContext(), llvm::AttributeList::FunctionIndex, |
1644 | llvm::Attribute::NoReturn); |
1645 | llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0))); |
1646 | CI->setAttributes(NoReturnAttr); |
1647 | return CI; |
1648 | } |
1649 | } |
1650 | llvm_unreachable("Incorrect MSVC intrinsic!"); |
1651 | } |
1652 | |
1653 | namespace { |
1654 | |
1655 | struct CallObjCArcUse final : EHScopeStack::Cleanup { |
1656 | CallObjCArcUse(llvm::Value *object) : object(object) {} |
1657 | llvm::Value *object; |
1658 | |
1659 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1660 | CGF.EmitARCIntrinsicUse(object); |
1661 | } |
1662 | }; |
1663 | } |
1664 | |
1665 | Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E, |
1666 | BuiltinCheckKind Kind) { |
1667 | assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) |
1668 | && "Unsupported builtin check kind"); |
1669 | |
1670 | Value *ArgValue = EmitScalarExpr(E); |
1671 | if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef()) |
1672 | return ArgValue; |
1673 | |
1674 | SanitizerScope SanScope(this); |
1675 | Value *Cond = Builder.CreateICmpNE( |
1676 | ArgValue, llvm::Constant::getNullValue(ArgValue->getType())); |
1677 | EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin), |
1678 | SanitizerHandler::InvalidBuiltin, |
1679 | {EmitCheckSourceLocation(E->getExprLoc()), |
1680 | llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)}, |
1681 | None); |
1682 | return ArgValue; |
1683 | } |
1684 | |
1685 | |
1686 | static CanQualType getOSLogArgType(ASTContext &C, int Size) { |
1687 | QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, false); |
1688 | return C.getCanonicalType(UnsignedTy); |
1689 | } |
1690 | |
1691 | llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction( |
1692 | const analyze_os_log::OSLogBufferLayout &Layout, |
1693 | CharUnits BufferAlignment) { |
1694 | ASTContext &Ctx = getContext(); |
1695 | |
1696 | llvm::SmallString<64> Name; |
1697 | { |
1698 | raw_svector_ostream OS(Name); |
1699 | OS << "__os_log_helper"; |
1700 | OS << "_" << BufferAlignment.getQuantity(); |
1701 | OS << "_" << int(Layout.getSummaryByte()); |
1702 | OS << "_" << int(Layout.getNumArgsByte()); |
1703 | for (const auto &Item : Layout.Items) |
1704 | OS << "_" << int(Item.getSizeByte()) << "_" |
1705 | << int(Item.getDescriptorByte()); |
1706 | } |
1707 | |
1708 | if (llvm::Function *F = CGM.getModule().getFunction(Name)) |
1709 | return F; |
1710 | |
1711 | llvm::SmallVector<QualType, 4> ArgTys; |
1712 | FunctionArgList Args; |
1713 | Args.push_back(ImplicitParamDecl::Create( |
1714 | Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy, |
1715 | ImplicitParamDecl::Other)); |
1716 | ArgTys.emplace_back(Ctx.VoidPtrTy); |
1717 | |
1718 | for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) { |
1719 | char Size = Layout.Items[I].getSizeByte(); |
1720 | if (!Size) |
1721 | continue; |
1722 | |
1723 | QualType ArgTy = getOSLogArgType(Ctx, Size); |
1724 | Args.push_back(ImplicitParamDecl::Create( |
1725 | Ctx, nullptr, SourceLocation(), |
1726 | &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy, |
1727 | ImplicitParamDecl::Other)); |
1728 | ArgTys.emplace_back(ArgTy); |
1729 | } |
1730 | |
1731 | QualType ReturnTy = Ctx.VoidTy; |
1732 | |
1733 | |
1734 | |
1735 | |
1736 | const CGFunctionInfo &FI = |
1737 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args); |
1738 | llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI); |
1739 | llvm::Function *Fn = llvm::Function::Create( |
1740 | FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule()); |
1741 | Fn->setVisibility(llvm::GlobalValue::HiddenVisibility); |
1742 | CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, false); |
1743 | CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn); |
1744 | Fn->setDoesNotThrow(); |
1745 | |
1746 | |
1747 | if (CGM.getCodeGenOpts().OptimizeSize == 2) |
1748 | Fn->addFnAttr(llvm::Attribute::NoInline); |
1749 | |
1750 | auto NL = ApplyDebugLocation::CreateEmpty(*this); |
1751 | StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args); |
1752 | |
1753 | |
1754 | auto AL = ApplyDebugLocation::CreateArtificial(*this); |
1755 | |
1756 | CharUnits Offset; |
1757 | Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), |
1758 | BufferAlignment); |
1759 | Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()), |
1760 | Builder.CreateConstByteGEP(BufAddr, Offset++, "summary")); |
1761 | Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()), |
1762 | Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs")); |
1763 | |
1764 | unsigned I = 1; |
1765 | for (const auto &Item : Layout.Items) { |
1766 | Builder.CreateStore( |
1767 | Builder.getInt8(Item.getDescriptorByte()), |
1768 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor")); |
1769 | Builder.CreateStore( |
1770 | Builder.getInt8(Item.getSizeByte()), |
1771 | Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize")); |
1772 | |
1773 | CharUnits Size = Item.size(); |
1774 | if (!Size.getQuantity()) |
1775 | continue; |
1776 | |
1777 | Address Arg = GetAddrOfLocalVar(Args[I]); |
1778 | Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData"); |
1779 | Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(), |
1780 | "argDataCast"); |
1781 | Builder.CreateStore(Builder.CreateLoad(Arg), Addr); |
1782 | Offset += Size; |
1783 | ++I; |
1784 | } |
1785 | |
1786 | FinishFunction(); |
1787 | |
1788 | return Fn; |
1789 | } |
1790 | |
1791 | RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { |
1792 | assert(E.getNumArgs() >= 2 && |
1793 | "__builtin_os_log_format takes at least 2 arguments"); |
1794 | ASTContext &Ctx = getContext(); |
1795 | analyze_os_log::OSLogBufferLayout Layout; |
1796 | analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout); |
1797 | Address BufAddr = EmitPointerWithAlignment(E.getArg(0)); |
1798 | llvm::SmallVector<llvm::Value *, 4> RetainableOperands; |
1799 | |
1800 | |
1801 | CallArgList Args; |
1802 | Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy); |
1803 | |
1804 | for (const auto &Item : Layout.Items) { |
1805 | int Size = Item.getSizeByte(); |
1806 | if (!Size) |
1807 | continue; |
1808 | |
1809 | llvm::Value *ArgVal; |
1810 | |
1811 | if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) { |
1812 | uint64_t Val = 0; |
1813 | for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I) |
1814 | Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8; |
1815 | ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val)); |
1816 | } else if (const Expr *TheExpr = Item.getExpr()) { |
1817 | ArgVal = EmitScalarExpr(TheExpr, false); |
1818 | |
1819 | |
1820 | |
1821 | |
1822 | auto LifetimeExtendObject = [&](const Expr *E) { |
1823 | E = E->IgnoreParenCasts(); |
1824 | |
1825 | |
1826 | |
1827 | |
1828 | |
1829 | |
1830 | if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E)) |
1831 | return true; |
1832 | return false; |
1833 | }; |
1834 | |
1835 | if (TheExpr->getType()->isObjCRetainableType() && |
1836 | getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) { |
1837 | assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar && |
1838 | "Only scalar can be a ObjC retainable type"); |
1839 | if (!isa<Constant>(ArgVal)) { |
1840 | CleanupKind Cleanup = getARCCleanupKind(); |
1841 | QualType Ty = TheExpr->getType(); |
1842 | Address Alloca = Address::invalid(); |
1843 | Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca); |
1844 | ArgVal = EmitARCRetain(Ty, ArgVal); |
1845 | Builder.CreateStore(ArgVal, Addr); |
1846 | pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty, |
1847 | CodeGenFunction::destroyARCStrongPrecise, |
1848 | Cleanup & EHCleanup); |
1849 | |
1850 | |
1851 | |
1852 | if (CGM.getCodeGenOpts().OptimizationLevel != 0) |
1853 | pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal); |
1854 | } |
1855 | } |
1856 | } else { |
1857 | ArgVal = Builder.getInt32(Item.getConstValue().getQuantity()); |
1858 | } |
1859 | |
1860 | unsigned ArgValSize = |
1861 | CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType()); |
1862 | llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(), |
1863 | ArgValSize); |
1864 | ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy); |
1865 | CanQualType ArgTy = getOSLogArgType(Ctx, Size); |
1866 | |
1867 | ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy)); |
1868 | Args.add(RValue::get(ArgVal), ArgTy); |
1869 | } |
1870 | |
1871 | const CGFunctionInfo &FI = |
1872 | CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args); |
1873 | llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction( |
1874 | Layout, BufAddr.getAlignment()); |
1875 | EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args); |
1876 | return RValue::get(BufAddr.getPointer()); |
1877 | } |
1878 | |
1879 | static bool isSpecialUnsignedMultiplySignedResult( |
1880 | unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, |
1881 | WidthAndSignedness ResultInfo) { |
1882 | return BuiltinID == Builtin::BI__builtin_mul_overflow && |
1883 | Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width && |
1884 | !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed; |
1885 | } |
1886 | |
1887 | static RValue EmitCheckedUnsignedMultiplySignedResult( |
1888 | CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, |
1889 | const clang::Expr *Op2, WidthAndSignedness Op2Info, |
1890 | const clang::Expr *ResultArg, QualType ResultQTy, |
1891 | WidthAndSignedness ResultInfo) { |
1892 | assert(isSpecialUnsignedMultiplySignedResult( |
1893 | Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) && |
1894 | "Cannot specialize this multiply"); |
1895 | |
1896 | llvm::Value *V1 = CGF.EmitScalarExpr(Op1); |
1897 | llvm::Value *V2 = CGF.EmitScalarExpr(Op2); |
1898 | |
1899 | llvm::Value *HasOverflow; |
1900 | llvm::Value *Result = EmitOverflowIntrinsic( |
1901 | CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow); |
1902 | |
1903 | |
1904 | |
1905 | |
1906 | auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width); |
1907 | llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax); |
1908 | |
1909 | llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue); |
1910 | HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow); |
1911 | |
1912 | bool isVolatile = |
1913 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
1914 | Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); |
1915 | CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, |
1916 | isVolatile); |
1917 | return RValue::get(HasOverflow); |
1918 | } |
1919 | |
1920 | |
1921 | static bool isSpecialMixedSignMultiply(unsigned BuiltinID, |
1922 | WidthAndSignedness Op1Info, |
1923 | WidthAndSignedness Op2Info, |
1924 | WidthAndSignedness ResultInfo) { |
1925 | return BuiltinID == Builtin::BI__builtin_mul_overflow && |
1926 | std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width && |
1927 | Op1Info.Signed != Op2Info.Signed; |
1928 | } |
1929 | |
1930 | |
1931 | |
1932 | static RValue |
1933 | EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, |
1934 | WidthAndSignedness Op1Info, const clang::Expr *Op2, |
1935 | WidthAndSignedness Op2Info, |
1936 | const clang::Expr *ResultArg, QualType ResultQTy, |
1937 | WidthAndSignedness ResultInfo) { |
1938 | assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info, |
1939 | Op2Info, ResultInfo) && |
1940 | "Not a mixed-sign multipliction we can specialize"); |
1941 | |
1942 | |
1943 | const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2; |
1944 | const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1; |
1945 | llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp); |
1946 | llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp); |
1947 | unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width; |
1948 | unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width; |
1949 | |
1950 | |
1951 | if (SignedOpWidth < UnsignedOpWidth) |
1952 | Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext"); |
1953 | if (UnsignedOpWidth < SignedOpWidth) |
1954 | Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext"); |
1955 | |
1956 | llvm::Type *OpTy = Signed->getType(); |
1957 | llvm::Value *Zero = llvm::Constant::getNullValue(OpTy); |
1958 | Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg); |
1959 | llvm::Type *ResTy = ResultPtr.getElementType(); |
1960 | unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width); |
1961 | |
1962 | |
1963 | llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero); |
1964 | llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed); |
1965 | llvm::Value *AbsSigned = |
1966 | CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed); |
1967 | |
1968 | |
1969 | llvm::Value *UnsignedOverflow; |
1970 | llvm::Value *UnsignedResult = |
1971 | EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned, |
1972 | Unsigned, UnsignedOverflow); |
1973 | |
1974 | llvm::Value *Overflow, *Result; |
1975 | if (ResultInfo.Signed) { |
1976 | |
1977 | |
1978 | auto IntMax = |
1979 | llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth); |
1980 | llvm::Value *MaxResult = |
1981 | CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax), |
1982 | CGF.Builder.CreateZExt(IsNegative, OpTy)); |
1983 | llvm::Value *SignedOverflow = |
1984 | CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult); |
1985 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow); |
1986 | |
1987 | |
1988 | llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult); |
1989 | llvm::Value *SignedResult = |
1990 | CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult); |
1991 | Result = CGF.Builder.CreateTrunc(SignedResult, ResTy); |
1992 | } else { |
1993 | |
1994 | llvm::Value *Underflow = CGF.Builder.CreateAnd( |
1995 | IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult)); |
1996 | Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow); |
1997 | if (ResultInfo.Width < OpWidth) { |
1998 | auto IntMax = |
1999 | llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth); |
2000 | llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT( |
2001 | UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax)); |
2002 | Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow); |
2003 | } |
2004 | |
2005 | |
2006 | Result = CGF.Builder.CreateSelect( |
2007 | IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult); |
2008 | |
2009 | Result = CGF.Builder.CreateTrunc(Result, ResTy); |
2010 | } |
2011 | assert(Overflow && Result && "Missing overflow or result"); |
2012 | |
2013 | bool isVolatile = |
2014 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
2015 | CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr, |
2016 | isVolatile); |
2017 | return RValue::get(Overflow); |
2018 | } |
2019 | |
2020 | static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType, |
2021 | Value *&RecordPtr, CharUnits Align, |
2022 | llvm::FunctionCallee Func, int Lvl) { |
2023 | ASTContext &Context = CGF.getContext(); |
2024 | RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition(); |
2025 | std::string Pad = std::string(Lvl * 4, ' '); |
2026 | |
2027 | Value *GString = |
2028 | CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n"); |
2029 | Value *Res = CGF.Builder.CreateCall(Func, {GString}); |
2030 | |
2031 | static llvm::DenseMap<QualType, const char *> Types; |
2032 | if (Types.empty()) { |
2033 | Types[Context.CharTy] = "%c"; |
2034 | Types[Context.BoolTy] = "%d"; |
2035 | Types[Context.SignedCharTy] = "%hhd"; |
2036 | Types[Context.UnsignedCharTy] = "%hhu"; |
2037 | Types[Context.IntTy] = "%d"; |
2038 | Types[Context.UnsignedIntTy] = "%u"; |
2039 | Types[Context.LongTy] = "%ld"; |
2040 | Types[Context.UnsignedLongTy] = "%lu"; |
2041 | Types[Context.LongLongTy] = "%lld"; |
2042 | Types[Context.UnsignedLongLongTy] = "%llu"; |
2043 | Types[Context.ShortTy] = "%hd"; |
2044 | Types[Context.UnsignedShortTy] = "%hu"; |
2045 | Types[Context.VoidPtrTy] = "%p"; |
2046 | Types[Context.FloatTy] = "%f"; |
2047 | Types[Context.DoubleTy] = "%f"; |
2048 | Types[Context.LongDoubleTy] = "%Lf"; |
2049 | Types[Context.getPointerType(Context.CharTy)] = "%s"; |
2050 | Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s"; |
2051 | } |
2052 | |
2053 | for (const auto *FD : RD->fields()) { |
2054 | Value *FieldPtr = RecordPtr; |
2055 | if (RD->isUnion()) |
2056 | FieldPtr = CGF.Builder.CreatePointerCast( |
2057 | FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType()))); |
2058 | else |
2059 | FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr, |
2060 | FD->getFieldIndex()); |
2061 | |
2062 | GString = CGF.Builder.CreateGlobalStringPtr( |
2063 | llvm::Twine(Pad) |
2064 | .concat(FD->getType().getAsString()) |
2065 | .concat(llvm::Twine(' ')) |
2066 | .concat(FD->getNameAsString()) |
2067 | .concat(" : ") |
2068 | .str()); |
2069 | Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
2070 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
2071 | |
2072 | QualType CanonicalType = |
2073 | FD->getType().getUnqualifiedType().getCanonicalType(); |
2074 | |
2075 | |
2076 | if (CanonicalType->isRecordType()) { |
2077 | TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1); |
2078 | Res = CGF.Builder.CreateAdd(TmpRes, Res); |
2079 | continue; |
2080 | } |
2081 | |
2082 | |
2083 | llvm::Twine Format = Types.find(CanonicalType) == Types.end() |
2084 | ? Types[Context.VoidPtrTy] |
2085 | : Types[CanonicalType]; |
2086 | |
2087 | Address FieldAddress = Address(FieldPtr, Align); |
2088 | FieldPtr = CGF.Builder.CreateLoad(FieldAddress); |
2089 | |
2090 | |
2091 | GString = CGF.Builder.CreateGlobalStringPtr( |
2092 | Format.concat(llvm::Twine('\n')).str()); |
2093 | TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr}); |
2094 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
2095 | } |
2096 | |
2097 | GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n"); |
2098 | Value *TmpRes = CGF.Builder.CreateCall(Func, {GString}); |
2099 | Res = CGF.Builder.CreateAdd(Res, TmpRes); |
2100 | return Res; |
2101 | } |
2102 | |
2103 | static bool |
2104 | TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, |
2105 | llvm::SmallPtrSetImpl<const Decl *> &Seen) { |
2106 | if (const auto *Arr = Ctx.getAsArrayType(Ty)) |
2107 | Ty = Ctx.getBaseElementType(Arr); |
2108 | |
2109 | const auto *Record = Ty->getAsCXXRecordDecl(); |
2110 | if (!Record) |
2111 | return false; |
2112 | |
2113 | |
2114 | if (!Seen.insert(Record).second) |
2115 | return false; |
2116 | |
2117 | assert(Record->hasDefinition() && |
2118 | "Incomplete types should already be diagnosed"); |
2119 | |
2120 | if (Record->isDynamicClass()) |
2121 | return true; |
2122 | |
2123 | for (FieldDecl *F : Record->fields()) { |
2124 | if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen)) |
2125 | return true; |
2126 | } |
2127 | return false; |
2128 | } |
2129 | |
2130 | |
2131 | |
2132 | static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) { |
2133 | if (!CGM.getCodeGenOpts().StrictVTablePointers) |
2134 | return false; |
2135 | llvm::SmallPtrSet<const Decl *, 16> Seen; |
2136 | return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen); |
2137 | } |
2138 | |
2139 | RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { |
2140 | llvm::Value *Src = EmitScalarExpr(E->getArg(0)); |
2141 | llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1)); |
2142 | |
2143 | |
2144 | |
2145 | llvm::Type *Ty = Src->getType(); |
2146 | ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false); |
2147 | |
2148 | |
2149 | unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; |
2150 | Function *F = CGM.getIntrinsic(IID, Ty); |
2151 | return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt })); |
2152 | } |
2153 | |
2154 | |
2155 | static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) { |
2156 | switch (BuiltinID) { |
2157 | #define MUTATE_LDBL(func) \ |
2158 | case Builtin::BI__builtin_##func##l: \ |
2159 | return Builtin::BI__builtin_##func##f128; |
2160 | MUTATE_LDBL(sqrt) |
2161 | MUTATE_LDBL(cbrt) |
2162 | MUTATE_LDBL(fabs) |
2163 | MUTATE_LDBL(log) |
2164 | MUTATE_LDBL(log2) |
2165 | MUTATE_LDBL(log10) |
2166 | MUTATE_LDBL(log1p) |
2167 | MUTATE_LDBL(logb) |
2168 | MUTATE_LDBL(exp) |
2169 | MUTATE_LDBL(exp2) |
2170 | MUTATE_LDBL(expm1) |
2171 | MUTATE_LDBL(fdim) |
2172 | MUTATE_LDBL(hypot) |
2173 | MUTATE_LDBL(ilogb) |
2174 | MUTATE_LDBL(pow) |
2175 | MUTATE_LDBL(fmin) |
2176 | MUTATE_LDBL(fmax) |
2177 | MUTATE_LDBL(ceil) |
2178 | MUTATE_LDBL(trunc) |
2179 | MUTATE_LDBL(rint) |
2180 | MUTATE_LDBL(nearbyint) |
2181 | MUTATE_LDBL(round) |
2182 | MUTATE_LDBL(floor) |
2183 | MUTATE_LDBL(lround) |
2184 | MUTATE_LDBL(llround) |
2185 | MUTATE_LDBL(lrint) |
2186 | MUTATE_LDBL(llrint) |
2187 | MUTATE_LDBL(fmod) |
2188 | MUTATE_LDBL(modf) |
2189 | MUTATE_LDBL(nan) |
2190 | MUTATE_LDBL(nans) |
2191 | MUTATE_LDBL(inf) |
2192 | MUTATE_LDBL(fma) |
2193 | MUTATE_LDBL(sin) |
2194 | MUTATE_LDBL(cos) |
2195 | MUTATE_LDBL(tan) |
2196 | MUTATE_LDBL(sinh) |
2197 | MUTATE_LDBL(cosh) |
2198 | MUTATE_LDBL(tanh) |
2199 | MUTATE_LDBL(asin) |
2200 | MUTATE_LDBL(acos) |
2201 | MUTATE_LDBL(atan) |
2202 | MUTATE_LDBL(asinh) |
2203 | MUTATE_LDBL(acosh) |
2204 | MUTATE_LDBL(atanh) |
2205 | MUTATE_LDBL(atan2) |
2206 | MUTATE_LDBL(erf) |
2207 | MUTATE_LDBL(erfc) |
2208 | MUTATE_LDBL(ldexp) |
2209 | MUTATE_LDBL(frexp) |
2210 | MUTATE_LDBL(huge_val) |
2211 | MUTATE_LDBL(copysign) |
2212 | MUTATE_LDBL(nextafter) |
2213 | MUTATE_LDBL(nexttoward) |
2214 | MUTATE_LDBL(remainder) |
2215 | MUTATE_LDBL(remquo) |
2216 | MUTATE_LDBL(scalbln) |
2217 | MUTATE_LDBL(scalbn) |
2218 | MUTATE_LDBL(tgamma) |
2219 | MUTATE_LDBL(lgamma) |
2220 | #undef MUTATE_LDBL |
2221 | default: |
2222 | return BuiltinID; |
2223 | } |
2224 | } |
2225 | |
2226 | RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, |
2227 | const CallExpr *E, |
2228 | ReturnValueSlot ReturnValue) { |
2229 | const FunctionDecl *FD = GD.getDecl()->getAsFunction(); |
2230 | |
2231 | Expr::EvalResult Result; |
2232 | if (E->EvaluateAsRValue(Result, CGM.getContext()) && |
2233 | !Result.hasSideEffects()) { |
2234 | if (Result.Val.isInt()) |
2235 | return RValue::get(llvm::ConstantInt::get(getLLVMContext(), |
2236 | Result.Val.getInt())); |
2237 | if (Result.Val.isFloat()) |
2238 | return RValue::get(llvm::ConstantFP::get(getLLVMContext(), |
2239 | Result.Val.getFloat())); |
2240 | } |
2241 | |
2242 | |
2243 | |
2244 | |
2245 | |
2246 | if (getTarget().getTriple().isPPC64() && |
2247 | &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad()) |
2248 | BuiltinID = mutateLongDoubleBuiltin(BuiltinID); |
2249 | |
2250 | |
2251 | |
2252 | |
2253 | |
2254 | const unsigned BuiltinIDIfNoAsmLabel = |
2255 | FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID; |
2256 | |
2257 | |
2258 | |
2259 | |
2260 | |
2261 | |
2262 | if (FD->hasAttr<ConstAttr>()) { |
2263 | switch (BuiltinIDIfNoAsmLabel) { |
2264 | case Builtin::BIceil: |
2265 | case Builtin::BIceilf: |
2266 | case Builtin::BIceill: |
2267 | case Builtin::BI__builtin_ceil: |
2268 | case Builtin::BI__builtin_ceilf: |
2269 | case Builtin::BI__builtin_ceilf16: |
2270 | case Builtin::BI__builtin_ceill: |
2271 | case Builtin::BI__builtin_ceilf128: |
2272 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2273 | Intrinsic::ceil, |
2274 | Intrinsic::experimental_constrained_ceil)); |
2275 | |
2276 | case Builtin::BIcopysign: |
2277 | case Builtin::BIcopysignf: |
2278 | case Builtin::BIcopysignl: |
2279 | case Builtin::BI__builtin_copysign: |
2280 | case Builtin::BI__builtin_copysignf: |
2281 | case Builtin::BI__builtin_copysignf16: |
2282 | case Builtin::BI__builtin_copysignl: |
2283 | case Builtin::BI__builtin_copysignf128: |
2284 | return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign)); |
2285 | |
2286 | case Builtin::BIcos: |
2287 | case Builtin::BIcosf: |
2288 | case Builtin::BIcosl: |
2289 | case Builtin::BI__builtin_cos: |
2290 | case Builtin::BI__builtin_cosf: |
2291 | case Builtin::BI__builtin_cosf16: |
2292 | case Builtin::BI__builtin_cosl: |
2293 | case Builtin::BI__builtin_cosf128: |
2294 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2295 | Intrinsic::cos, |
2296 | Intrinsic::experimental_constrained_cos)); |
2297 | |
2298 | case Builtin::BIexp: |
2299 | case Builtin::BIexpf: |
2300 | case Builtin::BIexpl: |
2301 | case Builtin::BI__builtin_exp: |
2302 | case Builtin::BI__builtin_expf: |
2303 | case Builtin::BI__builtin_expf16: |
2304 | case Builtin::BI__builtin_expl: |
2305 | case Builtin::BI__builtin_expf128: |
2306 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2307 | Intrinsic::exp, |
2308 | Intrinsic::experimental_constrained_exp)); |
2309 | |
2310 | case Builtin::BIexp2: |
2311 | case Builtin::BIexp2f: |
2312 | case Builtin::BIexp2l: |
2313 | case Builtin::BI__builtin_exp2: |
2314 | case Builtin::BI__builtin_exp2f: |
2315 | case Builtin::BI__builtin_exp2f16: |
2316 | case Builtin::BI__builtin_exp2l: |
2317 | case Builtin::BI__builtin_exp2f128: |
2318 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2319 | Intrinsic::exp2, |
2320 | Intrinsic::experimental_constrained_exp2)); |
2321 | |
2322 | case Builtin::BIfabs: |
2323 | case Builtin::BIfabsf: |
2324 | case Builtin::BIfabsl: |
2325 | case Builtin::BI__builtin_fabs: |
2326 | case Builtin::BI__builtin_fabsf: |
2327 | case Builtin::BI__builtin_fabsf16: |
2328 | case Builtin::BI__builtin_fabsl: |
2329 | case Builtin::BI__builtin_fabsf128: |
2330 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs)); |
2331 | |
2332 | case Builtin::BIfloor: |
2333 | case Builtin::BIfloorf: |
2334 | case Builtin::BIfloorl: |
2335 | case Builtin::BI__builtin_floor: |
2336 | case Builtin::BI__builtin_floorf: |
2337 | case Builtin::BI__builtin_floorf16: |
2338 | case Builtin::BI__builtin_floorl: |
2339 | case Builtin::BI__builtin_floorf128: |
2340 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2341 | Intrinsic::floor, |
2342 | Intrinsic::experimental_constrained_floor)); |
2343 | |
2344 | case Builtin::BIfma: |
2345 | case Builtin::BIfmaf: |
2346 | case Builtin::BIfmal: |
2347 | case Builtin::BI__builtin_fma: |
2348 | case Builtin::BI__builtin_fmaf: |
2349 | case Builtin::BI__builtin_fmaf16: |
2350 | case Builtin::BI__builtin_fmal: |
2351 | case Builtin::BI__builtin_fmaf128: |
2352 | return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E, |
2353 | Intrinsic::fma, |
2354 | Intrinsic::experimental_constrained_fma)); |
2355 | |
2356 | case Builtin::BIfmax: |
2357 | case Builtin::BIfmaxf: |
2358 | case Builtin::BIfmaxl: |
2359 | case Builtin::BI__builtin_fmax: |
2360 | case Builtin::BI__builtin_fmaxf: |
2361 | case Builtin::BI__builtin_fmaxf16: |
2362 | case Builtin::BI__builtin_fmaxl: |
2363 | case Builtin::BI__builtin_fmaxf128: |
2364 | return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
2365 | Intrinsic::maxnum, |
2366 | Intrinsic::experimental_constrained_maxnum)); |
2367 | |
2368 | case Builtin::BIfmin: |
2369 | case Builtin::BIfminf: |
2370 | case Builtin::BIfminl: |
2371 | case Builtin::BI__builtin_fmin: |
2372 | case Builtin::BI__builtin_fminf: |
2373 | case Builtin::BI__builtin_fminf16: |
2374 | case Builtin::BI__builtin_fminl: |
2375 | case Builtin::BI__builtin_fminf128: |
2376 | return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
2377 | Intrinsic::minnum, |
2378 | Intrinsic::experimental_constrained_minnum)); |
2379 | |
2380 | |
2381 | |
2382 | case Builtin::BIfmod: |
2383 | case Builtin::BIfmodf: |
2384 | case Builtin::BIfmodl: |
2385 | case Builtin::BI__builtin_fmod: |
2386 | case Builtin::BI__builtin_fmodf: |
2387 | case Builtin::BI__builtin_fmodf16: |
2388 | case Builtin::BI__builtin_fmodl: |
2389 | case Builtin::BI__builtin_fmodf128: { |
2390 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
2391 | Value *Arg1 = EmitScalarExpr(E->getArg(0)); |
2392 | Value *Arg2 = EmitScalarExpr(E->getArg(1)); |
2393 | return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod")); |
2394 | } |
2395 | |
2396 | case Builtin::BIlog: |
2397 | case Builtin::BIlogf: |
2398 | case Builtin::BIlogl: |
2399 | case Builtin::BI__builtin_log: |
2400 | case Builtin::BI__builtin_logf: |
2401 | case Builtin::BI__builtin_logf16: |
2402 | case Builtin::BI__builtin_logl: |
2403 | case Builtin::BI__builtin_logf128: |
2404 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2405 | Intrinsic::log, |
2406 | Intrinsic::experimental_constrained_log)); |
2407 | |
2408 | case Builtin::BIlog10: |
2409 | case Builtin::BIlog10f: |
2410 | case Builtin::BIlog10l: |
2411 | case Builtin::BI__builtin_log10: |
2412 | case Builtin::BI__builtin_log10f: |
2413 | case Builtin::BI__builtin_log10f16: |
2414 | case Builtin::BI__builtin_log10l: |
2415 | case Builtin::BI__builtin_log10f128: |
2416 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2417 | Intrinsic::log10, |
2418 | Intrinsic::experimental_constrained_log10)); |
2419 | |
2420 | case Builtin::BIlog2: |
2421 | case Builtin::BIlog2f: |
2422 | case Builtin::BIlog2l: |
2423 | case Builtin::BI__builtin_log2: |
2424 | case Builtin::BI__builtin_log2f: |
2425 | case Builtin::BI__builtin_log2f16: |
2426 | case Builtin::BI__builtin_log2l: |
2427 | case Builtin::BI__builtin_log2f128: |
2428 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2429 | Intrinsic::log2, |
2430 | Intrinsic::experimental_constrained_log2)); |
2431 | |
2432 | case Builtin::BInearbyint: |
2433 | case Builtin::BInearbyintf: |
2434 | case Builtin::BInearbyintl: |
2435 | case Builtin::BI__builtin_nearbyint: |
2436 | case Builtin::BI__builtin_nearbyintf: |
2437 | case Builtin::BI__builtin_nearbyintl: |
2438 | case Builtin::BI__builtin_nearbyintf128: |
2439 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2440 | Intrinsic::nearbyint, |
2441 | Intrinsic::experimental_constrained_nearbyint)); |
2442 | |
2443 | case Builtin::BIpow: |
2444 | case Builtin::BIpowf: |
2445 | case Builtin::BIpowl: |
2446 | case Builtin::BI__builtin_pow: |
2447 | case Builtin::BI__builtin_powf: |
2448 | case Builtin::BI__builtin_powf16: |
2449 | case Builtin::BI__builtin_powl: |
2450 | case Builtin::BI__builtin_powf128: |
2451 | return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, |
2452 | Intrinsic::pow, |
2453 | Intrinsic::experimental_constrained_pow)); |
2454 | |
2455 | case Builtin::BIrint: |
2456 | case Builtin::BIrintf: |
2457 | case Builtin::BIrintl: |
2458 | case Builtin::BI__builtin_rint: |
2459 | case Builtin::BI__builtin_rintf: |
2460 | case Builtin::BI__builtin_rintf16: |
2461 | case Builtin::BI__builtin_rintl: |
2462 | case Builtin::BI__builtin_rintf128: |
2463 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2464 | Intrinsic::rint, |
2465 | Intrinsic::experimental_constrained_rint)); |
2466 | |
2467 | case Builtin::BIround: |
2468 | case Builtin::BIroundf: |
2469 | case Builtin::BIroundl: |
2470 | case Builtin::BI__builtin_round: |
2471 | case Builtin::BI__builtin_roundf: |
2472 | case Builtin::BI__builtin_roundf16: |
2473 | case Builtin::BI__builtin_roundl: |
2474 | case Builtin::BI__builtin_roundf128: |
2475 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2476 | Intrinsic::round, |
2477 | Intrinsic::experimental_constrained_round)); |
2478 | |
2479 | case Builtin::BIsin: |
2480 | case Builtin::BIsinf: |
2481 | case Builtin::BIsinl: |
2482 | case Builtin::BI__builtin_sin: |
2483 | case Builtin::BI__builtin_sinf: |
2484 | case Builtin::BI__builtin_sinf16: |
2485 | case Builtin::BI__builtin_sinl: |
2486 | case Builtin::BI__builtin_sinf128: |
2487 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2488 | Intrinsic::sin, |
2489 | Intrinsic::experimental_constrained_sin)); |
2490 | |
2491 | case Builtin::BIsqrt: |
2492 | case Builtin::BIsqrtf: |
2493 | case Builtin::BIsqrtl: |
2494 | case Builtin::BI__builtin_sqrt: |
2495 | case Builtin::BI__builtin_sqrtf: |
2496 | case Builtin::BI__builtin_sqrtf16: |
2497 | case Builtin::BI__builtin_sqrtl: |
2498 | case Builtin::BI__builtin_sqrtf128: |
2499 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2500 | Intrinsic::sqrt, |
2501 | Intrinsic::experimental_constrained_sqrt)); |
2502 | |
2503 | case Builtin::BItrunc: |
2504 | case Builtin::BItruncf: |
2505 | case Builtin::BItruncl: |
2506 | case Builtin::BI__builtin_trunc: |
2507 | case Builtin::BI__builtin_truncf: |
2508 | case Builtin::BI__builtin_truncf16: |
2509 | case Builtin::BI__builtin_truncl: |
2510 | case Builtin::BI__builtin_truncf128: |
2511 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, |
2512 | Intrinsic::trunc, |
2513 | Intrinsic::experimental_constrained_trunc)); |
2514 | |
2515 | case Builtin::BIlround: |
2516 | case Builtin::BIlroundf: |
2517 | case Builtin::BIlroundl: |
2518 | case Builtin::BI__builtin_lround: |
2519 | case Builtin::BI__builtin_lroundf: |
2520 | case Builtin::BI__builtin_lroundl: |
2521 | case Builtin::BI__builtin_lroundf128: |
2522 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2523 | *this, E, Intrinsic::lround, |
2524 | Intrinsic::experimental_constrained_lround)); |
2525 | |
2526 | case Builtin::BIllround: |
2527 | case Builtin::BIllroundf: |
2528 | case Builtin::BIllroundl: |
2529 | case Builtin::BI__builtin_llround: |
2530 | case Builtin::BI__builtin_llroundf: |
2531 | case Builtin::BI__builtin_llroundl: |
2532 | case Builtin::BI__builtin_llroundf128: |
2533 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2534 | *this, E, Intrinsic::llround, |
2535 | Intrinsic::experimental_constrained_llround)); |
2536 | |
2537 | case Builtin::BIlrint: |
2538 | case Builtin::BIlrintf: |
2539 | case Builtin::BIlrintl: |
2540 | case Builtin::BI__builtin_lrint: |
2541 | case Builtin::BI__builtin_lrintf: |
2542 | case Builtin::BI__builtin_lrintl: |
2543 | case Builtin::BI__builtin_lrintf128: |
2544 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2545 | *this, E, Intrinsic::lrint, |
2546 | Intrinsic::experimental_constrained_lrint)); |
2547 | |
2548 | case Builtin::BIllrint: |
2549 | case Builtin::BIllrintf: |
2550 | case Builtin::BIllrintl: |
2551 | case Builtin::BI__builtin_llrint: |
2552 | case Builtin::BI__builtin_llrintf: |
2553 | case Builtin::BI__builtin_llrintl: |
2554 | case Builtin::BI__builtin_llrintf128: |
2555 | return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin( |
2556 | *this, E, Intrinsic::llrint, |
2557 | Intrinsic::experimental_constrained_llrint)); |
2558 | |
2559 | default: |
2560 | break; |
2561 | } |
2562 | } |
2563 | |
2564 | switch (BuiltinIDIfNoAsmLabel) { |
2565 | default: break; |
2566 | case Builtin::BI__builtin___CFStringMakeConstantString: |
2567 | case Builtin::BI__builtin___NSStringMakeConstantString: |
2568 | return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); |
2569 | case Builtin::BI__builtin_stdarg_start: |
2570 | case Builtin::BI__builtin_va_start: |
2571 | case Builtin::BI__va_start: |
2572 | case Builtin::BI__builtin_va_end: |
2573 | return RValue::get( |
2574 | EmitVAStartEnd(BuiltinID == Builtin::BI__va_start |
2575 | ? EmitScalarExpr(E->getArg(0)) |
2576 | : EmitVAListRef(E->getArg(0)).getPointer(), |
2577 | BuiltinID != Builtin::BI__builtin_va_end)); |
2578 | case Builtin::BI__builtin_va_copy: { |
2579 | Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer(); |
2580 | Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer(); |
2581 | |
2582 | llvm::Type *Type = Int8PtrTy; |
2583 | |
2584 | DstPtr = Builder.CreateBitCast(DstPtr, Type); |
2585 | SrcPtr = Builder.CreateBitCast(SrcPtr, Type); |
2586 | return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), |
2587 | {DstPtr, SrcPtr})); |
2588 | } |
2589 | case Builtin::BI__builtin_abs: |
2590 | case Builtin::BI__builtin_labs: |
2591 | case Builtin::BI__builtin_llabs: { |
2592 | |
2593 | |
2594 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2595 | Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg"); |
2596 | Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType()); |
2597 | Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond"); |
2598 | Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs"); |
2599 | return RValue::get(Result); |
2600 | } |
2601 | case Builtin::BI__builtin_complex: { |
2602 | Value *Real = EmitScalarExpr(E->getArg(0)); |
2603 | Value *Imag = EmitScalarExpr(E->getArg(1)); |
2604 | return RValue::getComplex({Real, Imag}); |
2605 | } |
2606 | case Builtin::BI__builtin_conj: |
2607 | case Builtin::BI__builtin_conjf: |
2608 | case Builtin::BI__builtin_conjl: |
2609 | case Builtin::BIconj: |
2610 | case Builtin::BIconjf: |
2611 | case Builtin::BIconjl: { |
2612 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2613 | Value *Real = ComplexVal.first; |
2614 | Value *Imag = ComplexVal.second; |
2615 | Imag = Builder.CreateFNeg(Imag, "neg"); |
2616 | return RValue::getComplex(std::make_pair(Real, Imag)); |
2617 | } |
2618 | case Builtin::BI__builtin_creal: |
2619 | case Builtin::BI__builtin_crealf: |
2620 | case Builtin::BI__builtin_creall: |
2621 | case Builtin::BIcreal: |
2622 | case Builtin::BIcrealf: |
2623 | case Builtin::BIcreall: { |
2624 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2625 | return RValue::get(ComplexVal.first); |
2626 | } |
2627 | |
2628 | case Builtin::BI__builtin_dump_struct: { |
2629 | llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy); |
2630 | llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get( |
2631 | LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true); |
2632 | |
2633 | Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts()); |
2634 | CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment(); |
2635 | |
2636 | const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts(); |
2637 | QualType Arg0Type = Arg0->getType()->getPointeeType(); |
2638 | |
2639 | Value *RecordPtr = EmitScalarExpr(Arg0); |
2640 | Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align, |
2641 | {LLVMFuncType, Func}, 0); |
2642 | return RValue::get(Res); |
2643 | } |
2644 | |
2645 | case Builtin::BI__builtin_preserve_access_index: { |
2646 | |
2647 | |
2648 | |
2649 | if (!getDebugInfo()) { |
2650 | CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g"); |
2651 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
2652 | } |
2653 | |
2654 | |
2655 | if (IsInPreservedAIRegion) { |
2656 | CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported"); |
2657 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
2658 | } |
2659 | |
2660 | IsInPreservedAIRegion = true; |
2661 | Value *Res = EmitScalarExpr(E->getArg(0)); |
2662 | IsInPreservedAIRegion = false; |
2663 | return RValue::get(Res); |
2664 | } |
2665 | |
2666 | case Builtin::BI__builtin_cimag: |
2667 | case Builtin::BI__builtin_cimagf: |
2668 | case Builtin::BI__builtin_cimagl: |
2669 | case Builtin::BIcimag: |
2670 | case Builtin::BIcimagf: |
2671 | case Builtin::BIcimagl: { |
2672 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2673 | return RValue::get(ComplexVal.second); |
2674 | } |
2675 | |
2676 | case Builtin::BI__builtin_clrsb: |
2677 | case Builtin::BI__builtin_clrsbl: |
2678 | case Builtin::BI__builtin_clrsbll: { |
2679 | |
2680 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2681 | |
2682 | llvm::Type *ArgType = ArgValue->getType(); |
2683 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
2684 | |
2685 | llvm::Type *ResultType = ConvertType(E->getType()); |
2686 | Value *Zero = llvm::Constant::getNullValue(ArgType); |
2687 | Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg"); |
2688 | Value *Inverse = Builder.CreateNot(ArgValue, "not"); |
2689 | Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue); |
2690 | Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()}); |
2691 | Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1)); |
2692 | Result = Builder.CreateIntCast(Result, ResultType, true, |
2693 | "cast"); |
2694 | return RValue::get(Result); |
2695 | } |
2696 | case Builtin::BI__builtin_ctzs: |
2697 | case Builtin::BI__builtin_ctz: |
2698 | case Builtin::BI__builtin_ctzl: |
2699 | case Builtin::BI__builtin_ctzll: { |
2700 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero); |
2701 | |
2702 | llvm::Type *ArgType = ArgValue->getType(); |
2703 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
2704 | |
2705 | llvm::Type *ResultType = ConvertType(E->getType()); |
2706 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
2707 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
2708 | if (Result->getType() != ResultType) |
2709 | Result = Builder.CreateIntCast(Result, ResultType, true, |
2710 | "cast"); |
2711 | return RValue::get(Result); |
2712 | } |
2713 | case Builtin::BI__builtin_clzs: |
2714 | case Builtin::BI__builtin_clz: |
2715 | case Builtin::BI__builtin_clzl: |
2716 | case Builtin::BI__builtin_clzll: { |
2717 | Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero); |
2718 | |
2719 | llvm::Type *ArgType = ArgValue->getType(); |
2720 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
2721 | |
2722 | llvm::Type *ResultType = ConvertType(E->getType()); |
2723 | Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); |
2724 | Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); |
2725 | if (Result->getType() != ResultType) |
2726 | Result = Builder.CreateIntCast(Result, ResultType, true, |
2727 | "cast"); |
2728 | return RValue::get(Result); |
2729 | } |
2730 | case Builtin::BI__builtin_ffs: |
2731 | case Builtin::BI__builtin_ffsl: |
2732 | case Builtin::BI__builtin_ffsll: { |
2733 | |
2734 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2735 | |
2736 | llvm::Type *ArgType = ArgValue->getType(); |
2737 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); |
2738 | |
2739 | llvm::Type *ResultType = ConvertType(E->getType()); |
2740 | Value *Tmp = |
2741 | Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}), |
2742 | llvm::ConstantInt::get(ArgType, 1)); |
2743 | Value *Zero = llvm::Constant::getNullValue(ArgType); |
2744 | Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); |
2745 | Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); |
2746 | if (Result->getType() != ResultType) |
2747 | Result = Builder.CreateIntCast(Result, ResultType, true, |
2748 | "cast"); |
2749 | return RValue::get(Result); |
2750 | } |
2751 | case Builtin::BI__builtin_parity: |
2752 | case Builtin::BI__builtin_parityl: |
2753 | case Builtin::BI__builtin_parityll: { |
2754 | |
2755 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2756 | |
2757 | llvm::Type *ArgType = ArgValue->getType(); |
2758 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
2759 | |
2760 | llvm::Type *ResultType = ConvertType(E->getType()); |
2761 | Value *Tmp = Builder.CreateCall(F, ArgValue); |
2762 | Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
2763 | if (Result->getType() != ResultType) |
2764 | Result = Builder.CreateIntCast(Result, ResultType, true, |
2765 | "cast"); |
2766 | return RValue::get(Result); |
2767 | } |
2768 | case Builtin::BI__lzcnt16: |
2769 | case Builtin::BI__lzcnt: |
2770 | case Builtin::BI__lzcnt64: { |
2771 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2772 | |
2773 | llvm::Type *ArgType = ArgValue->getType(); |
2774 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); |
2775 | |
2776 | llvm::Type *ResultType = ConvertType(E->getType()); |
2777 | Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()}); |
2778 | if (Result->getType() != ResultType) |
2779 | Result = Builder.CreateIntCast(Result, ResultType, true, |
2780 | "cast"); |
2781 | return RValue::get(Result); |
2782 | } |
2783 | case Builtin::BI__popcnt16: |
2784 | case Builtin::BI__popcnt: |
2785 | case Builtin::BI__popcnt64: |
2786 | case Builtin::BI__builtin_popcount: |
2787 | case Builtin::BI__builtin_popcountl: |
2788 | case Builtin::BI__builtin_popcountll: { |
2789 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2790 | |
2791 | llvm::Type *ArgType = ArgValue->getType(); |
2792 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
2793 | |
2794 | llvm::Type *ResultType = ConvertType(E->getType()); |
2795 | Value *Result = Builder.CreateCall(F, ArgValue); |
2796 | if (Result->getType() != ResultType) |
2797 | Result = Builder.CreateIntCast(Result, ResultType, true, |
2798 | "cast"); |
2799 | return RValue::get(Result); |
2800 | } |
2801 | case Builtin::BI__builtin_unpredictable: { |
2802 | |
2803 | |
2804 | |
2805 | return RValue::get(EmitScalarExpr(E->getArg(0))); |
2806 | } |
2807 | case Builtin::BI__builtin_expect: { |
2808 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2809 | llvm::Type *ArgType = ArgValue->getType(); |
2810 | |
2811 | Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
2812 | |
2813 | |
2814 | |
2815 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
2816 | return RValue::get(ArgValue); |
2817 | |
2818 | Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); |
2819 | Value *Result = |
2820 | Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval"); |
2821 | return RValue::get(Result); |
2822 | } |
2823 | case Builtin::BI__builtin_expect_with_probability: { |
2824 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2825 | llvm::Type *ArgType = ArgValue->getType(); |
2826 | |
2827 | Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); |
2828 | llvm::APFloat Probability(0.0); |
2829 | const Expr *ProbArg = E->getArg(2); |
2830 | bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext()); |
2831 | assert(EvalSucceed && "probability should be able to evaluate as float"); |
2832 | (void)EvalSucceed; |
2833 | bool LoseInfo = false; |
2834 | Probability.convert(llvm::APFloat::IEEEdouble(), |
2835 | llvm::RoundingMode::Dynamic, &LoseInfo); |
2836 | llvm::Type *Ty = ConvertType(ProbArg->getType()); |
2837 | Constant *Confidence = ConstantFP::get(Ty, Probability); |
2838 | |
2839 | |
2840 | |
2841 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
2842 | return RValue::get(ArgValue); |
2843 | |
2844 | Function *FnExpect = |
2845 | CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType); |
2846 | Value *Result = Builder.CreateCall( |
2847 | FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval"); |
2848 | return RValue::get(Result); |
2849 | } |
2850 | case Builtin::BI__builtin_assume_aligned: { |
2851 | const Expr *Ptr = E->getArg(0); |
2852 | Value *PtrValue = EmitScalarExpr(Ptr); |
2853 | Value *OffsetValue = |
2854 | (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; |
2855 | |
2856 | Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); |
2857 | ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue); |
2858 | if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) |
2859 | AlignmentCI = ConstantInt::get(AlignmentCI->getType(), |
2860 | llvm::Value::MaximumAlignment); |
2861 | |
2862 | emitAlignmentAssumption(PtrValue, Ptr, |
2863 | SourceLocation(), |
2864 | AlignmentCI, OffsetValue); |
2865 | return RValue::get(PtrValue); |
2866 | } |
2867 | case Builtin::BI__assume: |
2868 | case Builtin::BI__builtin_assume: { |
2869 | if (E->getArg(0)->HasSideEffects(getContext())) |
2870 | return RValue::get(nullptr); |
2871 | |
2872 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2873 | Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume); |
2874 | return RValue::get(Builder.CreateCall(FnAssume, ArgValue)); |
2875 | } |
2876 | case Builtin::BI__arithmetic_fence: { |
2877 | |
2878 | |
2879 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
2880 | llvm::FastMathFlags FMF = Builder.getFastMathFlags(); |
2881 | bool isArithmeticFenceEnabled = |
2882 | FMF.allowReassoc() && |
2883 | getContext().getTargetInfo().checkArithmeticFenceSupported(); |
2884 | QualType ArgType = E->getArg(0)->getType(); |
2885 | if (ArgType->isComplexType()) { |
2886 | if (isArithmeticFenceEnabled) { |
2887 | QualType ElementType = ArgType->castAs<ComplexType>()->getElementType(); |
2888 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2889 | Value *Real = Builder.CreateArithmeticFence(ComplexVal.first, |
2890 | ConvertType(ElementType)); |
2891 | Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second, |
2892 | ConvertType(ElementType)); |
2893 | return RValue::getComplex(std::make_pair(Real, Imag)); |
2894 | } |
2895 | ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); |
2896 | Value *Real = ComplexVal.first; |
2897 | Value *Imag = ComplexVal.second; |
2898 | return RValue::getComplex(std::make_pair(Real, Imag)); |
2899 | } |
2900 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
2901 | if (isArithmeticFenceEnabled) |
2902 | return RValue::get( |
2903 | Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType))); |
2904 | return RValue::get(ArgValue); |
2905 | } |
2906 | case Builtin::BI__builtin_bswap16: |
2907 | case Builtin::BI__builtin_bswap32: |
2908 | case Builtin::BI__builtin_bswap64: { |
2909 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap)); |
2910 | } |
2911 | case Builtin::BI__builtin_bitreverse8: |
2912 | case Builtin::BI__builtin_bitreverse16: |
2913 | case Builtin::BI__builtin_bitreverse32: |
2914 | case Builtin::BI__builtin_bitreverse64: { |
2915 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse)); |
2916 | } |
2917 | case Builtin::BI__builtin_rotateleft8: |
2918 | case Builtin::BI__builtin_rotateleft16: |
2919 | case Builtin::BI__builtin_rotateleft32: |
2920 | case Builtin::BI__builtin_rotateleft64: |
2921 | case Builtin::BI_rotl8: |
2922 | case Builtin::BI_rotl16: |
2923 | case Builtin::BI_rotl: |
2924 | case Builtin::BI_lrotl: |
2925 | case Builtin::BI_rotl64: |
2926 | return emitRotate(E, false); |
2927 | |
2928 | case Builtin::BI__builtin_rotateright8: |
2929 | case Builtin::BI__builtin_rotateright16: |
2930 | case Builtin::BI__builtin_rotateright32: |
2931 | case Builtin::BI__builtin_rotateright64: |
2932 | case Builtin::BI_rotr8: |
2933 | case Builtin::BI_rotr16: |
2934 | case Builtin::BI_rotr: |
2935 | case Builtin::BI_lrotr: |
2936 | case Builtin::BI_rotr64: |
2937 | return emitRotate(E, true); |
2938 | |
2939 | case Builtin::BI__builtin_constant_p: { |
2940 | llvm::Type *ResultType = ConvertType(E->getType()); |
2941 | |
2942 | const Expr *Arg = E->getArg(0); |
2943 | QualType ArgType = Arg->getType(); |
2944 | |
2945 | |
2946 | if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() && |
2947 | !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType()) |
2948 | |
2949 | |
2950 | return RValue::get(ConstantInt::get(ResultType, 0)); |
2951 | |
2952 | if (Arg->HasSideEffects(getContext())) |
2953 | |
2954 | |
2955 | return RValue::get(ConstantInt::get(ResultType, 0)); |
2956 | |
2957 | Value *ArgValue = EmitScalarExpr(Arg); |
2958 | if (ArgType->isObjCObjectPointerType()) { |
2959 | |
2960 | |
2961 | ArgType = CGM.getContext().getObjCIdType(); |
2962 | ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType)); |
2963 | } |
2964 | Function *F = |
2965 | CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType)); |
2966 | Value *Result = Builder.CreateCall(F, ArgValue); |
2967 | if (Result->getType() != ResultType) |
2968 | Result = Builder.CreateIntCast(Result, ResultType, false); |
2969 | return RValue::get(Result); |
2970 | } |
2971 | case Builtin::BI__builtin_dynamic_object_size: |
2972 | case Builtin::BI__builtin_object_size: { |
2973 | unsigned Type = |
2974 | E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
2975 | auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); |
2976 | |
2977 | |
2978 | |
2979 | bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size; |
2980 | return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, |
2981 | nullptr, IsDynamic)); |
2982 | } |
2983 | case Builtin::BI__builtin_prefetch: { |
2984 | Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); |
2985 | |
2986 | RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : |
2987 | llvm::ConstantInt::get(Int32Ty, 0); |
2988 | Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : |
2989 | llvm::ConstantInt::get(Int32Ty, 3); |
2990 | Value *Data = llvm::ConstantInt::get(Int32Ty, 1); |
2991 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
2992 | return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); |
2993 | } |
2994 | case Builtin::BI__builtin_readcyclecounter: { |
2995 | Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); |
2996 | return RValue::get(Builder.CreateCall(F)); |
2997 | } |
2998 | case Builtin::BI__builtin___clear_cache: { |
2999 | Value *Begin = EmitScalarExpr(E->getArg(0)); |
3000 | Value *End = EmitScalarExpr(E->getArg(1)); |
3001 | Function *F = CGM.getIntrinsic(Intrinsic::clear_cache); |
3002 | return RValue::get(Builder.CreateCall(F, {Begin, End})); |
3003 | } |
3004 | case Builtin::BI__builtin_trap: |
3005 | return RValue::get(EmitTrapCall(Intrinsic::trap)); |
3006 | case Builtin::BI__debugbreak: |
3007 | return RValue::get(EmitTrapCall(Intrinsic::debugtrap)); |
3008 | case Builtin::BI__builtin_unreachable: { |
3009 | EmitUnreachable(E->getExprLoc()); |
3010 | |
3011 | |
3012 | EmitBlock(createBasicBlock("unreachable.cont")); |
3013 | |
3014 | return RValue::get(nullptr); |
3015 | } |
3016 | |
3017 | case Builtin::BI__builtin_powi: |
3018 | case Builtin::BI__builtin_powif: |
3019 | case Builtin::BI__builtin_powil: { |
3020 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
3021 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
3022 | |
3023 | if (Builder.getIsFPConstrained()) { |
3024 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3025 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi, |
3026 | Src0->getType()); |
3027 | return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 })); |
3028 | } |
3029 | |
3030 | Function *F = CGM.getIntrinsic(Intrinsic::powi, |
3031 | { Src0->getType(), Src1->getType() }); |
3032 | return RValue::get(Builder.CreateCall(F, { Src0, Src1 })); |
3033 | } |
3034 | case Builtin::BI__builtin_isgreater: |
3035 | case Builtin::BI__builtin_isgreaterequal: |
3036 | case Builtin::BI__builtin_isless: |
3037 | case Builtin::BI__builtin_islessequal: |
3038 | case Builtin::BI__builtin_islessgreater: |
3039 | case Builtin::BI__builtin_isunordered: { |
3040 | |
3041 | |
3042 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3043 | |
3044 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
3045 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
3046 | |
3047 | switch (BuiltinID) { |
3048 | default: llvm_unreachable("Unknown ordered comparison"); |
3049 | case Builtin::BI__builtin_isgreater: |
3050 | LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); |
3051 | break; |
3052 | case Builtin::BI__builtin_isgreaterequal: |
3053 | LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); |
3054 | break; |
3055 | case Builtin::BI__builtin_isless: |
3056 | LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); |
3057 | break; |
3058 | case Builtin::BI__builtin_islessequal: |
3059 | LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); |
3060 | break; |
3061 | case Builtin::BI__builtin_islessgreater: |
3062 | LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); |
3063 | break; |
3064 | case Builtin::BI__builtin_isunordered: |
3065 | LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); |
3066 | break; |
3067 | } |
3068 | |
3069 | return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); |
3070 | } |
3071 | case Builtin::BI__builtin_isnan: { |
3072 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3073 | Value *V = EmitScalarExpr(E->getArg(0)); |
3074 | llvm::Type *Ty = V->getType(); |
3075 | const llvm::fltSemantics &Semantics = Ty->getFltSemantics(); |
3076 | if (!Builder.getIsFPConstrained() || |
3077 | Builder.getDefaultConstrainedExcept() == fp::ebIgnore || |
3078 | !Ty->isIEEE()) { |
3079 | V = Builder.CreateFCmpUNO(V, V, "cmp"); |
3080 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
3081 | } |
3082 | |
3083 | if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM)) |
3084 | return RValue::get(Result); |
3085 | |
3086 | |
3087 | |
3088 | unsigned bitsize = Ty->getScalarSizeInBits(); |
3089 | llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize); |
3090 | Value *IntV = Builder.CreateBitCast(V, IntTy); |
3091 | APInt AndMask = APInt::getSignedMaxValue(bitsize); |
3092 | Value *AbsV = |
3093 | Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask)); |
3094 | APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt(); |
3095 | Value *Sub = |
3096 | Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV); |
3097 | |
3098 | V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1)); |
3099 | if (bitsize > 32) |
3100 | V = Builder.CreateTrunc(V, ConvertType(E->getType())); |
3101 | return RValue::get(V); |
3102 | } |
3103 | |
3104 | case Builtin::BI__builtin_matrix_transpose: { |
3105 | const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>(); |
3106 | Value *MatValue = EmitScalarExpr(E->getArg(0)); |
3107 | MatrixBuilder<CGBuilderTy> MB(Builder); |
3108 | Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(), |
3109 | MatrixTy->getNumColumns()); |
3110 | return RValue::get(Result); |
3111 | } |
3112 | |
3113 | case Builtin::BI__builtin_matrix_column_major_load: { |
3114 | MatrixBuilder<CGBuilderTy> MB(Builder); |
3115 | |
3116 | Value *Stride = EmitScalarExpr(E->getArg(3)); |
3117 | const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>(); |
3118 | auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>(); |
3119 | assert(PtrTy && "arg0 must be of pointer type"); |
3120 | bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); |
3121 | |
3122 | Address Src = EmitPointerWithAlignment(E->getArg(0)); |
3123 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(), |
3124 | E->getArg(0)->getExprLoc(), FD, 0); |
3125 | Value *Result = MB.CreateColumnMajorLoad( |
3126 | Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride, |
3127 | IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(), |
3128 | "matrix"); |
3129 | return RValue::get(Result); |
3130 | } |
3131 | |
3132 | case Builtin::BI__builtin_matrix_column_major_store: { |
3133 | MatrixBuilder<CGBuilderTy> MB(Builder); |
3134 | Value *Matrix = EmitScalarExpr(E->getArg(0)); |
3135 | Address Dst = EmitPointerWithAlignment(E->getArg(1)); |
3136 | Value *Stride = EmitScalarExpr(E->getArg(2)); |
3137 | |
3138 | const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>(); |
3139 | auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>(); |
3140 | assert(PtrTy && "arg1 must be of pointer type"); |
3141 | bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); |
3142 | |
3143 | EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(), |
3144 | E->getArg(1)->getExprLoc(), FD, 0); |
3145 | Value *Result = MB.CreateColumnMajorStore( |
3146 | Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()), |
3147 | Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns()); |
3148 | return RValue::get(Result); |
3149 | } |
3150 | |
3151 | case Builtin::BIfinite: |
3152 | case Builtin::BI__finite: |
3153 | case Builtin::BIfinitef: |
3154 | case Builtin::BI__finitef: |
3155 | case Builtin::BIfinitel: |
3156 | case Builtin::BI__finitel: |
3157 | case Builtin::BI__builtin_isinf: |
3158 | case Builtin::BI__builtin_isfinite: { |
3159 | |
3160 | |
3161 | |
3162 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3163 | Value *V = EmitScalarExpr(E->getArg(0)); |
3164 | llvm::Type *Ty = V->getType(); |
3165 | if (!Builder.getIsFPConstrained() || |
3166 | Builder.getDefaultConstrainedExcept() == fp::ebIgnore || |
3167 | !Ty->isIEEE()) { |
3168 | Value *Fabs = EmitFAbs(*this, V); |
3169 | Constant *Infinity = ConstantFP::getInfinity(V->getType()); |
3170 | CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf) |
3171 | ? CmpInst::FCMP_OEQ |
3172 | : CmpInst::FCMP_ONE; |
3173 | Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf"); |
3174 | return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType()))); |
3175 | } |
3176 | |
3177 | if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM)) |
3178 | return RValue::get(Result); |
3179 | |
3180 | |
3181 | |
3182 | |
3183 | unsigned bitsize = Ty->getScalarSizeInBits(); |
3184 | llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize); |
3185 | Value *IntV = Builder.CreateBitCast(V, IntTy); |
3186 | Value *Shl1 = Builder.CreateShl(IntV, 1); |
3187 | const llvm::fltSemantics &Semantics = Ty->getFltSemantics(); |
3188 | APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt(); |
3189 | Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1)); |
3190 | if (BuiltinID == Builtin::BI__builtin_isinf) |
3191 | V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1); |
3192 | else |
3193 | V = Builder.CreateICmpULT(Shl1, ExpMaskShl1); |
3194 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
3195 | } |
3196 | |
3197 | case Builtin::BI__builtin_isinf_sign: { |
3198 | |
3199 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3200 | |
3201 | Value *Arg = EmitScalarExpr(E->getArg(0)); |
3202 | Value *AbsArg = EmitFAbs(*this, Arg); |
3203 | Value *IsInf = Builder.CreateFCmpOEQ( |
3204 | AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); |
3205 | Value *IsNeg = EmitSignBit(*this, Arg); |
3206 | |
3207 | llvm::Type *IntTy = ConvertType(E->getType()); |
3208 | Value *Zero = Constant::getNullValue(IntTy); |
3209 | Value *One = ConstantInt::get(IntTy, 1); |
3210 | Value *NegativeOne = ConstantInt::get(IntTy, -1); |
3211 | Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One); |
3212 | Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero); |
3213 | return RValue::get(Result); |
3214 | } |
3215 | |
3216 | case Builtin::BI__builtin_isnormal: { |
3217 | |
3218 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3219 | |
3220 | Value *V = EmitScalarExpr(E->getArg(0)); |
3221 | Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); |
3222 | |
3223 | Value *Abs = EmitFAbs(*this, V); |
3224 | Value *IsLessThanInf = |
3225 | Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); |
3226 | APFloat Smallest = APFloat::getSmallestNormalized( |
3227 | getContext().getFloatTypeSemantics(E->getArg(0)->getType())); |
3228 | Value *IsNormal = |
3229 | Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), |
3230 | "isnormal"); |
3231 | V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); |
3232 | V = Builder.CreateAnd(V, IsNormal, "and"); |
3233 | return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); |
3234 | } |
3235 | |
3236 | case Builtin::BI__builtin_flt_rounds: { |
3237 | Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds); |
3238 | |
3239 | llvm::Type *ResultType = ConvertType(E->getType()); |
3240 | Value *Result = Builder.CreateCall(F); |
3241 | if (Result->getType() != ResultType) |
3242 | Result = Builder.CreateIntCast(Result, ResultType, true, |
3243 | "cast"); |
3244 | return RValue::get(Result); |
3245 | } |
3246 | |
3247 | case Builtin::BI__builtin_fpclassify: { |
3248 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
3249 | |
3250 | Value *V = EmitScalarExpr(E->getArg(5)); |
3251 | llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); |
3252 | |
3253 | |
3254 | BasicBlock *Begin = Builder.GetInsertBlock(); |
3255 | BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); |
3256 | Builder.SetInsertPoint(End); |
3257 | PHINode *Result = |
3258 | Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, |
3259 | "fpclassify_result"); |
3260 | |
3261 | |
3262 | Builder.SetInsertPoint(Begin); |
3263 | Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), |
3264 | "iszero"); |
3265 | Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); |
3266 | BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); |
3267 | Builder.CreateCondBr(IsZero, End, NotZero); |
3268 | Result->addIncoming(ZeroLiteral, Begin); |
3269 | |
3270 | |
3271 | Builder.SetInsertPoint(NotZero); |
3272 | Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); |
3273 | Value *NanLiteral = EmitScalarExpr(E->getArg(0)); |
3274 | BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); |
3275 | Builder.CreateCondBr(IsNan, End, NotNan); |
3276 | Result->addIncoming(NanLiteral, NotZero); |
3277 | |
3278 | |
3279 | Builder.SetInsertPoint(NotNan); |
3280 | Value *VAbs = EmitFAbs(*this, V); |
3281 | Value *IsInf = |
3282 | Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), |
3283 | "isinf"); |
3284 | Value *InfLiteral = EmitScalarExpr(E->getArg(1)); |
3285 | BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); |
3286 | Builder.CreateCondBr(IsInf, End, NotInf); |
3287 | Result->addIncoming(InfLiteral, NotNan); |
3288 | |
3289 | |
3290 | Builder.SetInsertPoint(NotInf); |
3291 | APFloat Smallest = APFloat::getSmallestNormalized( |
3292 | getContext().getFloatTypeSemantics(E->getArg(5)->getType())); |
3293 | Value *IsNormal = |
3294 | Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), |
3295 | "isnormal"); |
3296 | Value *NormalResult = |
3297 | Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), |
3298 | EmitScalarExpr(E->getArg(3))); |
3299 | Builder.CreateBr(End); |
3300 | Result->addIncoming(NormalResult, NotInf); |
3301 | |
3302 | |
3303 | Builder.SetInsertPoint(End); |
3304 | return RValue::get(Result); |
3305 | } |
3306 | |
3307 | case Builtin::BIalloca: |
3308 | case Builtin::BI_alloca: |
3309 | case Builtin::BI__builtin_alloca: { |
3310 | Value *Size = EmitScalarExpr(E->getArg(0)); |
3311 | const TargetInfo &TI = getContext().getTargetInfo(); |
3312 | |
3313 | const Align SuitableAlignmentInBytes = |
3314 | CGM.getContext() |
3315 | .toCharUnitsFromBits(TI.getSuitableAlign()) |
3316 | .getAsAlign(); |
3317 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
3318 | AI->setAlignment(SuitableAlignmentInBytes); |
3319 | initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes); |
3320 | return RValue::get(AI); |
3321 | } |
3322 | |
3323 | case Builtin::BI__builtin_alloca_with_align: { |
3324 | Value *Size = EmitScalarExpr(E->getArg(0)); |
3325 | Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1)); |
3326 | auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue); |
3327 | unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue(); |
3328 | const Align AlignmentInBytes = |
3329 | CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign(); |
3330 | AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size); |
3331 | AI->setAlignment(AlignmentInBytes); |
3332 | initializeAlloca(*this, AI, Size, AlignmentInBytes); |
3333 | return RValue::get(AI); |
3334 | } |
3335 | |
3336 | case Builtin::BIbzero: |
3337 | case Builtin::BI__builtin_bzero: { |
3338 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3339 | Value *SizeVal = EmitScalarExpr(E->getArg(1)); |
3340 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3341 | E->getArg(0)->getExprLoc(), FD, 0); |
3342 | Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false); |
3343 | return RValue::get(nullptr); |
3344 | } |
3345 | case Builtin::BImemcpy: |
3346 | case Builtin::BI__builtin_memcpy: |
3347 | case Builtin::BImempcpy: |
3348 | case Builtin::BI__builtin_mempcpy: { |
3349 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3350 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3351 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3352 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3353 | E->getArg(0)->getExprLoc(), FD, 0); |
3354 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
3355 | E->getArg(1)->getExprLoc(), FD, 1); |
3356 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
3357 | if (BuiltinID == Builtin::BImempcpy || |
3358 | BuiltinID == Builtin::BI__builtin_mempcpy) |
3359 | return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(), |
3360 | Dest.getPointer(), SizeVal)); |
3361 | else |
3362 | return RValue::get(Dest.getPointer()); |
3363 | } |
3364 | |
3365 | case Builtin::BI__builtin_memcpy_inline: { |
3366 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3367 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3368 | uint64_t Size = |
3369 | E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
3370 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3371 | E->getArg(0)->getExprLoc(), FD, 0); |
3372 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
3373 | E->getArg(1)->getExprLoc(), FD, 1); |
3374 | Builder.CreateMemCpyInline(Dest, Src, Size); |
3375 | return RValue::get(nullptr); |
3376 | } |
3377 | |
3378 | case Builtin::BI__builtin_char_memchr: |
3379 | BuiltinID = Builtin::BI__builtin_memchr; |
3380 | break; |
3381 | |
3382 | case Builtin::BI__builtin___memcpy_chk: { |
3383 | |
3384 | Expr::EvalResult SizeResult, DstSizeResult; |
3385 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
3386 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
3387 | break; |
3388 | llvm::APSInt Size = SizeResult.Val.getInt(); |
3389 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
3390 | if (Size.ugt(DstSize)) |
3391 | break; |
3392 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3393 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3394 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
3395 | Builder.CreateMemCpy(Dest, Src, SizeVal, false); |
3396 | return RValue::get(Dest.getPointer()); |
3397 | } |
3398 | |
3399 | case Builtin::BI__builtin_objc_memmove_collectable: { |
3400 | Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); |
3401 | Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); |
3402 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3403 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, |
3404 | DestAddr, SrcAddr, SizeVal); |
3405 | return RValue::get(DestAddr.getPointer()); |
3406 | } |
3407 | |
3408 | case Builtin::BI__builtin___memmove_chk: { |
3409 | |
3410 | Expr::EvalResult SizeResult, DstSizeResult; |
3411 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
3412 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
3413 | break; |
3414 | llvm::APSInt Size = SizeResult.Val.getInt(); |
3415 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
3416 | if (Size.ugt(DstSize)) |
3417 | break; |
3418 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3419 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3420 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
3421 | Builder.CreateMemMove(Dest, Src, SizeVal, false); |
3422 | return RValue::get(Dest.getPointer()); |
3423 | } |
3424 | |
3425 | case Builtin::BImemmove: |
3426 | case Builtin::BI__builtin_memmove: { |
3427 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3428 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
3429 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3430 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3431 | E->getArg(0)->getExprLoc(), FD, 0); |
3432 | EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), |
3433 | E->getArg(1)->getExprLoc(), FD, 1); |
3434 | Builder.CreateMemMove(Dest, Src, SizeVal, false); |
3435 | return RValue::get(Dest.getPointer()); |
3436 | } |
3437 | case Builtin::BImemset: |
3438 | case Builtin::BI__builtin_memset: { |
3439 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3440 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
3441 | Builder.getInt8Ty()); |
3442 | Value *SizeVal = EmitScalarExpr(E->getArg(2)); |
3443 | EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), |
3444 | E->getArg(0)->getExprLoc(), FD, 0); |
3445 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
3446 | return RValue::get(Dest.getPointer()); |
3447 | } |
3448 | case Builtin::BI__builtin___memset_chk: { |
3449 | |
3450 | Expr::EvalResult SizeResult, DstSizeResult; |
3451 | if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || |
3452 | !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) |
3453 | break; |
3454 | llvm::APSInt Size = SizeResult.Val.getInt(); |
3455 | llvm::APSInt DstSize = DstSizeResult.Val.getInt(); |
3456 | if (Size.ugt(DstSize)) |
3457 | break; |
3458 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
3459 | Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), |
3460 | Builder.getInt8Ty()); |
3461 | Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); |
3462 | Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); |
3463 | return RValue::get(Dest.getPointer()); |
3464 | } |
3465 | case Builtin::BI__builtin_wmemchr: { |
3466 | |
3467 | |
3468 | if (!getTarget().getTriple().isOSMSVCRT()) |
3469 | break; |
3470 | |
3471 | llvm::Type *WCharTy = ConvertType(getContext().WCharTy); |
3472 | Value *Str = EmitScalarExpr(E->getArg(0)); |
3473 | Value *Chr = EmitScalarExpr(E->getArg(1)); |
3474 | Value *Size = EmitScalarExpr(E->getArg(2)); |
3475 | |
3476 | BasicBlock *Entry = Builder.GetInsertBlock(); |
3477 | BasicBlock *CmpEq = createBasicBlock("wmemchr.eq"); |
3478 | BasicBlock *Next = createBasicBlock("wmemchr.next"); |
3479 | BasicBlock *Exit = createBasicBlock("wmemchr.exit"); |
3480 | Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0)); |
3481 | Builder.CreateCondBr(SizeEq0, Exit, CmpEq); |
3482 | |
3483 | EmitBlock(CmpEq); |
3484 | PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2); |
3485 | StrPhi->addIncoming(Str, Entry); |
3486 | PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2); |
3487 | SizePhi->addIncoming(Size, Entry); |
3488 | CharUnits WCharAlign = |
3489 | getContext().getTypeAlignInChars(getContext().WCharTy); |
3490 | Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign); |
3491 | Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0); |
3492 | Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr); |
3493 | Builder.CreateCondBr(StrEqChr, Exit, Next); |
3494 | |
3495 | EmitBlock(Next); |
3496 | Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1); |
3497 | Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1)); |
3498 | Value *NextSizeEq0 = |
3499 | Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0)); |
3500 | Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq); |
3501 | StrPhi->addIncoming(NextStr, Next); |
3502 | SizePhi->addIncoming(NextSize, Next); |
3503 | |
3504 | EmitBlock(Exit); |
3505 | PHINode *Ret = Builder.CreatePHI(Str->getType(), 3); |
3506 | Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry); |
3507 | Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next); |
3508 | Ret->addIncoming(FoundChr, CmpEq); |
3509 | return RValue::get(Ret); |
3510 | } |
3511 | case Builtin::BI__builtin_wmemcmp: { |
3512 | |
3513 | |
3514 | if (!getTarget().getTriple().isOSMSVCRT()) |
3515 | break; |
3516 | |
3517 | llvm::Type *WCharTy = ConvertType(getContext().WCharTy); |
3518 | |
3519 | Value *Dst = EmitScalarExpr(E->getArg(0)); |
3520 | Value *Src = EmitScalarExpr(E->getArg(1)); |
3521 | Value *Size = EmitScalarExpr(E->getArg(2)); |
3522 | |
3523 | BasicBlock *Entry = Builder.GetInsertBlock(); |
3524 | BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt"); |
3525 | BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt"); |
3526 | BasicBlock *Next = createBasicBlock("wmemcmp.next"); |
3527 | BasicBlock *Exit = createBasicBlock("wmemcmp.exit"); |
3528 | Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0)); |
3529 | Builder.CreateCondBr(SizeEq0, Exit, CmpGT); |
3530 | |
3531 | EmitBlock(CmpGT); |
3532 | PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2); |
3533 | DstPhi->addIncoming(Dst, Entry); |
3534 | PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2); |
3535 | SrcPhi->addIncoming(Src, Entry); |
3536 | PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2); |
3537 | SizePhi->addIncoming(Size, Entry); |
3538 | CharUnits WCharAlign = |
3539 | getContext().getTypeAlignInChars(getContext().WCharTy); |
3540 | Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign); |
3541 | Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign); |
3542 | Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh); |
3543 | Builder.CreateCondBr(DstGtSrc, Exit, CmpLT); |
3544 | |
3545 | EmitBlock(CmpLT); |
3546 | Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh); |
3547 | Builder.CreateCondBr(DstLtSrc, Exit, Next); |
3548 | |
3549 | EmitBlock(Next); |
3550 | Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1); |
3551 | Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1); |
3552 | Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1)); |
3553 | Value *NextSizeEq0 = |
3554 | Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0)); |
3555 | Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT); |
3556 | DstPhi->addIncoming(NextDst, Next); |
3557 | SrcPhi->addIncoming(NextSrc, Next); |
3558 | SizePhi->addIncoming(NextSize, Next); |
3559 | |
3560 | EmitBlock(Exit); |
3561 | PHINode *Ret = Builder.CreatePHI(IntTy, 4); |
3562 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry); |
3563 | Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT); |
3564 | Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT); |
3565 | Ret->addIncoming(ConstantInt::get(IntTy, 0), Next); |
3566 | return RValue::get(Ret); |
3567 | } |
3568 | case Builtin::BI__builtin_dwarf_cfa: { |
3569 | |
3570 | |
3571 | |
3572 | |
3573 | |
3574 | |
3575 | |
3576 | |
3577 | int32_t Offset = 0; |
3578 | |
3579 | Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); |
3580 | return RValue::get(Builder.CreateCall(F, |
3581 | llvm::ConstantInt::get(Int32Ty, Offset))); |
3582 | } |
3583 | case Builtin::BI__builtin_return_address: { |
3584 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
3585 | getContext().UnsignedIntTy); |
3586 | Function *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
3587 | return RValue::get(Builder.CreateCall(F, Depth)); |
3588 | } |
3589 | case Builtin::BI_ReturnAddress: { |
3590 | Function *F = CGM.getIntrinsic(Intrinsic::returnaddress); |
3591 | return RValue::get(Builder.CreateCall(F, Builder.getInt32(0))); |
3592 | } |
3593 | case Builtin::BI__builtin_frame_address: { |
3594 | Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), |
3595 | getContext().UnsignedIntTy); |
3596 | Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy); |
3597 | return RValue::get(Builder.CreateCall(F, Depth)); |
3598 | } |
3599 | case Builtin::BI__builtin_extract_return_addr: { |
3600 | Value *Address = EmitScalarExpr(E->getArg(0)); |
3601 | Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); |
3602 | return RValue::get(Result); |
3603 | } |
3604 | case Builtin::BI__builtin_frob_return_addr: { |
3605 | Value *Address = EmitScalarExpr(E->getArg(0)); |
3606 | Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); |
3607 | return RValue::get(Result); |
3608 | } |
3609 | case Builtin::BI__builtin_dwarf_sp_column: { |
3610 | llvm::IntegerType *Ty |
3611 | = cast<llvm::IntegerType>(ConvertType(E->getType())); |
3612 | int Column = getTargetHooks().getDwarfEHStackPointer(CGM); |
3613 | if (Column == -1) { |
3614 | CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); |
3615 | return RValue::get(llvm::UndefValue::get(Ty)); |
3616 | } |
3617 | return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); |
3618 | } |
3619 | case Builtin::BI__builtin_init_dwarf_reg_size_table: { |
3620 | Value *Address = EmitScalarExpr(E->getArg(0)); |
3621 | if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) |
3622 | CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); |
3623 | return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); |
3624 | } |
3625 | case Builtin::BI__builtin_eh_return: { |
3626 | Value *Int = EmitScalarExpr(E->getArg(0)); |
3627 | Value *Ptr = EmitScalarExpr(E->getArg(1)); |
3628 | |
3629 | llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); |
3630 | assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && |
3631 | "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); |
3632 | Function *F = |
3633 | CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32 |
3634 | : Intrinsic::eh_return_i64); |
3635 | Builder.CreateCall(F, {Int, Ptr}); |
3636 | Builder.CreateUnreachable(); |
3637 | |
3638 | |
3639 | EmitBlock(createBasicBlock("builtin_eh_return.cont")); |
3640 | |
3641 | return RValue::get(nullptr); |
3642 | } |
3643 | case Builtin::BI__builtin_unwind_init: { |
3644 | Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); |
3645 | return RValue::get(Builder.CreateCall(F)); |
3646 | } |
3647 | case Builtin::BI__builtin_extend_pointer: { |
3648 | |
3649 | |
3650 | |
3651 | |
3652 | |
3653 | |
3654 | |
3655 | |
3656 | |
3657 | |
3658 | |
3659 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
3660 | Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); |
3661 | |
3662 | |
3663 | if (IntPtrTy->getBitWidth() == 64) |
3664 | return RValue::get(Result); |
3665 | |
3666 | |
3667 | if (getTargetHooks().extendPointerWithSExt()) |
3668 | return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); |
3669 | else |
3670 | return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); |
3671 | } |
3672 | case Builtin::BI__builtin_setjmp: { |
3673 | |
3674 | Address Buf = EmitPointerWithAlignment(E->getArg(0)); |
3675 | |
3676 | |
3677 | Value *FrameAddr = Builder.CreateCall( |
3678 | CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy), |
3679 | ConstantInt::get(Int32Ty, 0)); |
3680 | Builder.CreateStore(FrameAddr, Buf); |
3681 | |
3682 | |
3683 | Value *StackAddr = |
3684 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); |
3685 | Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2); |
3686 | Builder.CreateStore(StackAddr, StackSaveSlot); |
3687 | |
3688 | |
3689 | Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); |
3690 | Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
3691 | return RValue::get(Builder.CreateCall(F, Buf.getPointer())); |
3692 | } |
3693 | case Builtin::BI__builtin_longjmp: { |
3694 | Value *Buf = EmitScalarExpr(E->getArg(0)); |
3695 | Buf = Builder.CreateBitCast(Buf, Int8PtrTy); |
3696 | |
3697 | |
3698 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); |
3699 | |
3700 | |
3701 | Builder.CreateUnreachable(); |
3702 | |
3703 | |
3704 | EmitBlock(createBasicBlock("longjmp.cont")); |
3705 | |
3706 | return RValue::get(nullptr); |
3707 | } |
3708 | case Builtin::BI__builtin_launder: { |
3709 | const Expr *Arg = E->getArg(0); |
3710 | QualType ArgTy = Arg->getType()->getPointeeType(); |
3711 | Value *Ptr = EmitScalarExpr(Arg); |
3712 | if (TypeRequiresBuiltinLaunder(CGM, ArgTy)) |
3713 | Ptr = Builder.CreateLaunderInvariantGroup(Ptr); |
3714 | |
3715 | return RValue::get(Ptr); |
3716 | } |
3717 | case Builtin::BI__sync_fetch_and_add: |
3718 | case Builtin::BI__sync_fetch_and_sub: |
3719 | case Builtin::BI__sync_fetch_and_or: |
3720 | case Builtin::BI__sync_fetch_and_and: |
3721 | case Builtin::BI__sync_fetch_and_xor: |
3722 | case Builtin::BI__sync_fetch_and_nand: |
3723 | case Builtin::BI__sync_add_and_fetch: |
3724 | case Builtin::BI__sync_sub_and_fetch: |
3725 | case Builtin::BI__sync_and_and_fetch: |
3726 | case Builtin::BI__sync_or_and_fetch: |
3727 | case Builtin::BI__sync_xor_and_fetch: |
3728 | case Builtin::BI__sync_nand_and_fetch: |
3729 | case Builtin::BI__sync_val_compare_and_swap: |
3730 | case Builtin::BI__sync_bool_compare_and_swap: |
3731 | case Builtin::BI__sync_lock_test_and_set: |
3732 | case Builtin::BI__sync_lock_release: |
3733 | case Builtin::BI__sync_swap: |
3734 | llvm_unreachable("Shouldn't make it through sema"); |
3735 | case Builtin::BI__sync_fetch_and_add_1: |
3736 | case Builtin::BI__sync_fetch_and_add_2: |
3737 | case Builtin::BI__sync_fetch_and_add_4: |
3738 | case Builtin::BI__sync_fetch_and_add_8: |
3739 | case Builtin::BI__sync_fetch_and_add_16: |
3740 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); |
3741 | case Builtin::BI__sync_fetch_and_sub_1: |
3742 | case Builtin::BI__sync_fetch_and_sub_2: |
3743 | case Builtin::BI__sync_fetch_and_sub_4: |
3744 | case Builtin::BI__sync_fetch_and_sub_8: |
3745 | case Builtin::BI__sync_fetch_and_sub_16: |
3746 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); |
3747 | case Builtin::BI__sync_fetch_and_or_1: |
3748 | case Builtin::BI__sync_fetch_and_or_2: |
3749 | case Builtin::BI__sync_fetch_and_or_4: |
3750 | case Builtin::BI__sync_fetch_and_or_8: |
3751 | case Builtin::BI__sync_fetch_and_or_16: |
3752 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); |
3753 | case Builtin::BI__sync_fetch_and_and_1: |
3754 | case Builtin::BI__sync_fetch_and_and_2: |
3755 | case Builtin::BI__sync_fetch_and_and_4: |
3756 | case Builtin::BI__sync_fetch_and_and_8: |
3757 | case Builtin::BI__sync_fetch_and_and_16: |
3758 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); |
3759 | case Builtin::BI__sync_fetch_and_xor_1: |
3760 | case Builtin::BI__sync_fetch_and_xor_2: |
3761 | case Builtin::BI__sync_fetch_and_xor_4: |
3762 | case Builtin::BI__sync_fetch_and_xor_8: |
3763 | case Builtin::BI__sync_fetch_and_xor_16: |
3764 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); |
3765 | case Builtin::BI__sync_fetch_and_nand_1: |
3766 | case Builtin::BI__sync_fetch_and_nand_2: |
3767 | case Builtin::BI__sync_fetch_and_nand_4: |
3768 | case Builtin::BI__sync_fetch_and_nand_8: |
3769 | case Builtin::BI__sync_fetch_and_nand_16: |
3770 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E); |
3771 | |
3772 | |
3773 | case Builtin::BI__sync_fetch_and_min: |
3774 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); |
3775 | case Builtin::BI__sync_fetch_and_max: |
3776 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); |
3777 | case Builtin::BI__sync_fetch_and_umin: |
3778 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); |
3779 | case Builtin::BI__sync_fetch_and_umax: |
3780 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); |
3781 | |
3782 | case Builtin::BI__sync_add_and_fetch_1: |
3783 | case Builtin::BI__sync_add_and_fetch_2: |
3784 | case Builtin::BI__sync_add_and_fetch_4: |
3785 | case Builtin::BI__sync_add_and_fetch_8: |
3786 | case Builtin::BI__sync_add_and_fetch_16: |
3787 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, |
3788 | llvm::Instruction::Add); |
3789 | case Builtin::BI__sync_sub_and_fetch_1: |
3790 | case Builtin::BI__sync_sub_and_fetch_2: |
3791 | case Builtin::BI__sync_sub_and_fetch_4: |
3792 | case Builtin::BI__sync_sub_and_fetch_8: |
3793 | case Builtin::BI__sync_sub_and_fetch_16: |
3794 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, |
3795 | llvm::Instruction::Sub); |
3796 | case Builtin::BI__sync_and_and_fetch_1: |
3797 | case Builtin::BI__sync_and_and_fetch_2: |
3798 | case Builtin::BI__sync_and_and_fetch_4: |
3799 | case Builtin::BI__sync_and_and_fetch_8: |
3800 | case Builtin::BI__sync_and_and_fetch_16: |
3801 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, |
3802 | llvm::Instruction::And); |
3803 | case Builtin::BI__sync_or_and_fetch_1: |
3804 | case Builtin::BI__sync_or_and_fetch_2: |
3805 | case Builtin::BI__sync_or_and_fetch_4: |
3806 | case Builtin::BI__sync_or_and_fetch_8: |
3807 | case Builtin::BI__sync_or_and_fetch_16: |
3808 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, |
3809 | llvm::Instruction::Or); |
3810 | case Builtin::BI__sync_xor_and_fetch_1: |
3811 | case Builtin::BI__sync_xor_and_fetch_2: |
3812 | case Builtin::BI__sync_xor_and_fetch_4: |
3813 | case Builtin::BI__sync_xor_and_fetch_8: |
3814 | case Builtin::BI__sync_xor_and_fetch_16: |
3815 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, |
3816 | llvm::Instruction::Xor); |
3817 | case Builtin::BI__sync_nand_and_fetch_1: |
3818 | case Builtin::BI__sync_nand_and_fetch_2: |
3819 | case Builtin::BI__sync_nand_and_fetch_4: |
3820 | case Builtin::BI__sync_nand_and_fetch_8: |
3821 | case Builtin::BI__sync_nand_and_fetch_16: |
3822 | return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E, |
3823 | llvm::Instruction::And, true); |
3824 | |
3825 | case Builtin::BI__sync_val_compare_and_swap_1: |
3826 | case Builtin::BI__sync_val_compare_and_swap_2: |
3827 | case Builtin::BI__sync_val_compare_and_swap_4: |
3828 | case Builtin::BI__sync_val_compare_and_swap_8: |
3829 | case Builtin::BI__sync_val_compare_and_swap_16: |
3830 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, false)); |
3831 | |
3832 | case Builtin::BI__sync_bool_compare_and_swap_1: |
3833 | case Builtin::BI__sync_bool_compare_and_swap_2: |
3834 | case Builtin::BI__sync_bool_compare_and_swap_4: |
3835 | case Builtin::BI__sync_bool_compare_and_swap_8: |
3836 | case Builtin::BI__sync_bool_compare_and_swap_16: |
3837 | return RValue::get(MakeAtomicCmpXchgValue(*this, E, true)); |
3838 | |
3839 | case Builtin::BI__sync_swap_1: |
3840 | case Builtin::BI__sync_swap_2: |
3841 | case Builtin::BI__sync_swap_4: |
3842 | case Builtin::BI__sync_swap_8: |
3843 | case Builtin::BI__sync_swap_16: |
3844 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
3845 | |
3846 | case Builtin::BI__sync_lock_test_and_set_1: |
3847 | case Builtin::BI__sync_lock_test_and_set_2: |
3848 | case Builtin::BI__sync_lock_test_and_set_4: |
3849 | case Builtin::BI__sync_lock_test_and_set_8: |
3850 | case Builtin::BI__sync_lock_test_and_set_16: |
3851 | return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); |
3852 | |
3853 | case Builtin::BI__sync_lock_release_1: |
3854 | case Builtin::BI__sync_lock_release_2: |
3855 | case Builtin::BI__sync_lock_release_4: |
3856 | case Builtin::BI__sync_lock_release_8: |
3857 | case Builtin::BI__sync_lock_release_16: { |
3858 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
3859 | QualType ElTy = E->getArg(0)->getType()->getPointeeType(); |
3860 | CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); |
3861 | llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), |
3862 | StoreSize.getQuantity() * 8); |
3863 | Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); |
3864 | llvm::StoreInst *Store = |
3865 | Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr, |
3866 | StoreSize); |
3867 | Store->setAtomic(llvm::AtomicOrdering::Release); |
3868 | return RValue::get(nullptr); |
3869 | } |
3870 | |
3871 | case Builtin::BI__sync_synchronize: { |
3872 | |
3873 | |
3874 | |
3875 | |
3876 | |
3877 | |
3878 | |
3879 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent); |
3880 | return RValue::get(nullptr); |
3881 | } |
3882 | |
3883 | case Builtin::BI__builtin_nontemporal_load: |
3884 | return RValue::get(EmitNontemporalLoad(*this, E)); |
3885 | case Builtin::BI__builtin_nontemporal_store: |
3886 | return RValue::get(EmitNontemporalStore(*this, E)); |
3887 | case Builtin::BI__c11_atomic_is_lock_free: |
3888 | case Builtin::BI__atomic_is_lock_free: { |
3889 | |
3890 | |
3891 | |
3892 | const char *LibCallName = "__atomic_is_lock_free"; |
3893 | CallArgList Args; |
3894 | Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), |
3895 | getContext().getSizeType()); |
3896 | if (BuiltinID == Builtin::BI__atomic_is_lock_free) |
3897 | Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), |
3898 | getContext().VoidPtrTy); |
3899 | else |
3900 | Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), |
3901 | getContext().VoidPtrTy); |
3902 | const CGFunctionInfo &FuncInfo = |
3903 | CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); |
3904 | llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); |
3905 | llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName); |
3906 | return EmitCall(FuncInfo, CGCallee::forDirect(Func), |
3907 | ReturnValueSlot(), Args); |
3908 | } |
3909 | |
3910 | case Builtin::BI__atomic_test_and_set: { |
3911 | |
3912 | |
3913 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
3914 | bool Volatile = |
3915 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
3916 | |
3917 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
3918 | unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); |
3919 | Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
3920 | Value *NewVal = Builder.getInt8(1); |
3921 | Value *Order = EmitScalarExpr(E->getArg(1)); |
3922 | if (isa<llvm::ConstantInt>(Order)) { |
3923 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
3924 | AtomicRMWInst *Result = nullptr; |
3925 | switch (ord) { |
3926 | case 0: |
3927 | default: |
3928 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3929 | llvm::AtomicOrdering::Monotonic); |
3930 | break; |
3931 | case 1: |
3932 | case 2: |
3933 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3934 | llvm::AtomicOrdering::Acquire); |
3935 | break; |
3936 | case 3: |
3937 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3938 | llvm::AtomicOrdering::Release); |
3939 | break; |
3940 | case 4: |
3941 | |
3942 | Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3943 | llvm::AtomicOrdering::AcquireRelease); |
3944 | break; |
3945 | case 5: |
3946 | Result = Builder.CreateAtomicRMW( |
3947 | llvm::AtomicRMWInst::Xchg, Ptr, NewVal, |
3948 | llvm::AtomicOrdering::SequentiallyConsistent); |
3949 | break; |
3950 | } |
3951 | Result->setVolatile(Volatile); |
3952 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
3953 | } |
3954 | |
3955 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
3956 | |
3957 | llvm::BasicBlock *BBs[5] = { |
3958 | createBasicBlock("monotonic", CurFn), |
3959 | createBasicBlock("acquire", CurFn), |
3960 | createBasicBlock("release", CurFn), |
3961 | createBasicBlock("acqrel", CurFn), |
3962 | createBasicBlock("seqcst", CurFn) |
3963 | }; |
3964 | llvm::AtomicOrdering Orders[5] = { |
3965 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, |
3966 | llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, |
3967 | llvm::AtomicOrdering::SequentiallyConsistent}; |
3968 | |
3969 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
3970 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
3971 | |
3972 | Builder.SetInsertPoint(ContBB); |
3973 | PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); |
3974 | |
3975 | for (unsigned i = 0; i < 5; ++i) { |
3976 | Builder.SetInsertPoint(BBs[i]); |
3977 | AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, |
3978 | Ptr, NewVal, Orders[i]); |
3979 | RMW->setVolatile(Volatile); |
3980 | Result->addIncoming(RMW, BBs[i]); |
3981 | Builder.CreateBr(ContBB); |
3982 | } |
3983 | |
3984 | SI->addCase(Builder.getInt32(0), BBs[0]); |
3985 | SI->addCase(Builder.getInt32(1), BBs[1]); |
3986 | SI->addCase(Builder.getInt32(2), BBs[1]); |
3987 | SI->addCase(Builder.getInt32(3), BBs[2]); |
3988 | SI->addCase(Builder.getInt32(4), BBs[3]); |
3989 | SI->addCase(Builder.getInt32(5), BBs[4]); |
3990 | |
3991 | Builder.SetInsertPoint(ContBB); |
3992 | return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); |
3993 | } |
3994 | |
3995 | case Builtin::BI__atomic_clear: { |
3996 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
3997 | bool Volatile = |
3998 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
3999 | |
4000 | Address Ptr = EmitPointerWithAlignment(E->getArg(0)); |
4001 | unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace(); |
4002 | Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); |
4003 | Value *NewVal = Builder.getInt8(0); |
4004 | Value *Order = EmitScalarExpr(E->getArg(1)); |
4005 | if (isa<llvm::ConstantInt>(Order)) { |
4006 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
4007 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
4008 | switch (ord) { |
4009 | case 0: |
4010 | default: |
4011 | Store->setOrdering(llvm::AtomicOrdering::Monotonic); |
4012 | break; |
4013 | case 3: |
4014 | Store->setOrdering(llvm::AtomicOrdering::Release); |
4015 | break; |
4016 | case 5: |
4017 | Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); |
4018 | break; |
4019 | } |
4020 | return RValue::get(nullptr); |
4021 | } |
4022 | |
4023 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
4024 | |
4025 | llvm::BasicBlock *BBs[3] = { |
4026 | createBasicBlock("monotonic", CurFn), |
4027 | createBasicBlock("release", CurFn), |
4028 | createBasicBlock("seqcst", CurFn) |
4029 | }; |
4030 | llvm::AtomicOrdering Orders[3] = { |
4031 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, |
4032 | llvm::AtomicOrdering::SequentiallyConsistent}; |
4033 | |
4034 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
4035 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); |
4036 | |
4037 | for (unsigned i = 0; i < 3; ++i) { |
4038 | Builder.SetInsertPoint(BBs[i]); |
4039 | StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); |
4040 | Store->setOrdering(Orders[i]); |
4041 | Builder.CreateBr(ContBB); |
4042 | } |
4043 | |
4044 | SI->addCase(Builder.getInt32(0), BBs[0]); |
4045 | SI->addCase(Builder.getInt32(3), BBs[1]); |
4046 | SI->addCase(Builder.getInt32(5), BBs[2]); |
4047 | |
4048 | Builder.SetInsertPoint(ContBB); |
4049 | return RValue::get(nullptr); |
4050 | } |
4051 | |
4052 | case Builtin::BI__atomic_thread_fence: |
4053 | case Builtin::BI__atomic_signal_fence: |
4054 | case Builtin::BI__c11_atomic_thread_fence: |
4055 | case Builtin::BI__c11_atomic_signal_fence: { |
4056 | llvm::SyncScope::ID SSID; |
4057 | if (BuiltinID == Builtin::BI__atomic_signal_fence || |
4058 | BuiltinID == Builtin::BI__c11_atomic_signal_fence) |
4059 | SSID = llvm::SyncScope::SingleThread; |
4060 | else |
4061 | SSID = llvm::SyncScope::System; |
4062 | Value *Order = EmitScalarExpr(E->getArg(0)); |
4063 | if (isa<llvm::ConstantInt>(Order)) { |
4064 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
4065 | switch (ord) { |
4066 | case 0: |
4067 | default: |
4068 | break; |
4069 | case 1: |
4070 | case 2: |
4071 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
4072 | break; |
4073 | case 3: |
4074 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
4075 | break; |
4076 | case 4: |
4077 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
4078 | break; |
4079 | case 5: |
4080 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
4081 | break; |
4082 | } |
4083 | return RValue::get(nullptr); |
4084 | } |
4085 | |
4086 | llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; |
4087 | AcquireBB = createBasicBlock("acquire", CurFn); |
4088 | ReleaseBB = createBasicBlock("release", CurFn); |
4089 | AcqRelBB = createBasicBlock("acqrel", CurFn); |
4090 | SeqCstBB = createBasicBlock("seqcst", CurFn); |
4091 | llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); |
4092 | |
4093 | Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); |
4094 | llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); |
4095 | |
4096 | Builder.SetInsertPoint(AcquireBB); |
4097 | Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); |
4098 | Builder.CreateBr(ContBB); |
4099 | SI->addCase(Builder.getInt32(1), AcquireBB); |
4100 | SI->addCase(Builder.getInt32(2), AcquireBB); |
4101 | |
4102 | Builder.SetInsertPoint(ReleaseBB); |
4103 | Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); |
4104 | Builder.CreateBr(ContBB); |
4105 | SI->addCase(Builder.getInt32(3), ReleaseBB); |
4106 | |
4107 | Builder.SetInsertPoint(AcqRelBB); |
4108 | Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); |
4109 | Builder.CreateBr(ContBB); |
4110 | SI->addCase(Builder.getInt32(4), AcqRelBB); |
4111 | |
4112 | Builder.SetInsertPoint(SeqCstBB); |
4113 | Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); |
4114 | Builder.CreateBr(ContBB); |
4115 | SI->addCase(Builder.getInt32(5), SeqCstBB); |
4116 | |
4117 | Builder.SetInsertPoint(ContBB); |
4118 | return RValue::get(nullptr); |
4119 | } |
4120 | |
4121 | case Builtin::BI__builtin_signbit: |
4122 | case Builtin::BI__builtin_signbitf: |
4123 | case Builtin::BI__builtin_signbitl: { |
4124 | return RValue::get( |
4125 | Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), |
4126 | ConvertType(E->getType()))); |
4127 | } |
4128 | case Builtin::BI__warn_memset_zero_len: |
4129 | return RValue::getIgnored(); |
4130 | case Builtin::BI__annotation: { |
4131 | |
4132 | SmallVector<Metadata *, 1> Strings; |
4133 | for (const Expr *Arg : E->arguments()) { |
4134 | const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts()); |
4135 | assert(Str->getCharByteWidth() == 2); |
4136 | StringRef WideBytes = Str->getBytes(); |
4137 | std::string StrUtf8; |
4138 | if (!convertUTF16ToUTF8String( |
4139 | makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) { |
4140 | CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument"); |
4141 | continue; |
4142 | } |
4143 | Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8)); |
4144 | } |
4145 | |
4146 | |
4147 | llvm::Function *F = |
4148 | CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {}); |
4149 | MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings); |
4150 | Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple)); |
4151 | return RValue::getIgnored(); |
4152 | } |
4153 | case Builtin::BI__builtin_annotation: { |
4154 | llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); |
4155 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, |
4156 | AnnVal->getType()); |
4157 | |
4158 | |
4159 | |
4160 | const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); |
4161 | StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); |
4162 | return RValue::get( |
4163 | EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr)); |
4164 | } |
4165 | case Builtin::BI__builtin_addcb: |
4166 | case Builtin::BI__builtin_addcs: |
4167 | case Builtin::BI__builtin_addc: |
4168 | case Builtin::BI__builtin_addcl: |
4169 | case Builtin::BI__builtin_addcll: |
4170 | case Builtin::BI__builtin_subcb: |
4171 | case Builtin::BI__builtin_subcs: |
4172 | case Builtin::BI__builtin_subc: |
4173 | case Builtin::BI__builtin_subcl: |
4174 | case Builtin::BI__builtin_subcll: { |
4175 | |
4176 | |
4177 | |
4178 | |
4179 | |
4180 | |
4181 | |
4182 | |
4183 | |
4184 | |
4185 | |
4186 | |
4187 | |
4188 | |
4189 | |
4190 | |
4191 | |
4192 | |
4193 | |
4194 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
4195 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
4196 | llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); |
4197 | Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
4198 | |
4199 | |
4200 | llvm::Intrinsic::ID IntrinsicId; |
4201 | switch (BuiltinID) { |
4202 | default: llvm_unreachable("Unknown multiprecision builtin id."); |
4203 | case Builtin::BI__builtin_addcb: |
4204 | case Builtin::BI__builtin_addcs: |
4205 | case Builtin::BI__builtin_addc: |
4206 | case Builtin::BI__builtin_addcl: |
4207 | case Builtin::BI__builtin_addcll: |
4208 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
4209 | break; |
4210 | case Builtin::BI__builtin_subcb: |
4211 | case Builtin::BI__builtin_subcs: |
4212 | case Builtin::BI__builtin_subc: |
4213 | case Builtin::BI__builtin_subcl: |
4214 | case Builtin::BI__builtin_subcll: |
4215 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
4216 | break; |
4217 | } |
4218 | |
4219 | |
4220 | llvm::Value *Carry1; |
4221 | llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, |
4222 | X, Y, Carry1); |
4223 | llvm::Value *Carry2; |
4224 | llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, |
4225 | Sum1, Carryin, Carry2); |
4226 | llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), |
4227 | X->getType()); |
4228 | Builder.CreateStore(CarryOut, CarryOutPtr); |
4229 | return RValue::get(Sum2); |
4230 | } |
4231 | |
4232 | case Builtin::BI__builtin_add_overflow: |
4233 | case Builtin::BI__builtin_sub_overflow: |
4234 | case Builtin::BI__builtin_mul_overflow: { |
4235 | const clang::Expr *LeftArg = E->getArg(0); |
4236 | const clang::Expr *RightArg = E->getArg(1); |
4237 | const clang::Expr *ResultArg = E->getArg(2); |
4238 | |
4239 | clang::QualType ResultQTy = |
4240 | ResultArg->getType()->castAs<PointerType>()->getPointeeType(); |
4241 | |
4242 | WidthAndSignedness LeftInfo = |
4243 | getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType()); |
4244 | WidthAndSignedness RightInfo = |
4245 | getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType()); |
4246 | WidthAndSignedness ResultInfo = |
4247 | getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy); |
4248 | |
4249 | |
4250 | |
4251 | if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo)) |
4252 | return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg, |
4253 | RightInfo, ResultArg, ResultQTy, |
4254 | ResultInfo); |
4255 | |
4256 | if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo, |
4257 | ResultInfo)) |
4258 | return EmitCheckedUnsignedMultiplySignedResult( |
4259 | *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy, |
4260 | ResultInfo); |
4261 | |
4262 | WidthAndSignedness EncompassingInfo = |
4263 | EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo}); |
4264 | |
4265 | llvm::Type *EncompassingLLVMTy = |
4266 | llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width); |
4267 | |
4268 | llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy); |
4269 | |
4270 | llvm::Intrinsic::ID IntrinsicId; |
4271 | switch (BuiltinID) { |
4272 | default: |
4273 | llvm_unreachable("Unknown overflow builtin id."); |
4274 | case Builtin::BI__builtin_add_overflow: |
4275 | IntrinsicId = EncompassingInfo.Signed |
4276 | ? llvm::Intrinsic::sadd_with_overflow |
4277 | : llvm::Intrinsic::uadd_with_overflow; |
4278 | break; |
4279 | case Builtin::BI__builtin_sub_overflow: |
4280 | IntrinsicId = EncompassingInfo.Signed |
4281 | ? llvm::Intrinsic::ssub_with_overflow |
4282 | : llvm::Intrinsic::usub_with_overflow; |
4283 | break; |
4284 | case Builtin::BI__builtin_mul_overflow: |
4285 | IntrinsicId = EncompassingInfo.Signed |
4286 | ? llvm::Intrinsic::smul_with_overflow |
4287 | : llvm::Intrinsic::umul_with_overflow; |
4288 | break; |
4289 | } |
4290 | |
4291 | llvm::Value *Left = EmitScalarExpr(LeftArg); |
4292 | llvm::Value *Right = EmitScalarExpr(RightArg); |
4293 | Address ResultPtr = EmitPointerWithAlignment(ResultArg); |
4294 | |
4295 | |
4296 | Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed); |
4297 | Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed); |
4298 | |
4299 | |
4300 | llvm::Value *Overflow, *Result; |
4301 | Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow); |
4302 | |
4303 | if (EncompassingInfo.Width > ResultInfo.Width) { |
4304 | |
4305 | |
4306 | llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy); |
4307 | |
4308 | |
4309 | |
4310 | llvm::Value *ResultTruncExt = Builder.CreateIntCast( |
4311 | ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed); |
4312 | llvm::Value *TruncationOverflow = |
4313 | Builder.CreateICmpNE(Result, ResultTruncExt); |
4314 | |
4315 | Overflow = Builder.CreateOr(Overflow, TruncationOverflow); |
4316 | Result = ResultTrunc; |
4317 | } |
4318 | |
4319 | |
4320 | bool isVolatile = |
4321 | ResultArg->getType()->getPointeeType().isVolatileQualified(); |
4322 | Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile); |
4323 | |
4324 | return RValue::get(Overflow); |
4325 | } |
4326 | |
4327 | case Builtin::BI__builtin_uadd_overflow: |
4328 | case Builtin::BI__builtin_uaddl_overflow: |
4329 | case Builtin::BI__builtin_uaddll_overflow: |
4330 | case Builtin::BI__builtin_usub_overflow: |
4331 | case Builtin::BI__builtin_usubl_overflow: |
4332 | case Builtin::BI__builtin_usubll_overflow: |
4333 | case Builtin::BI__builtin_umul_overflow: |
4334 | case Builtin::BI__builtin_umull_overflow: |
4335 | case Builtin::BI__builtin_umulll_overflow: |
4336 | case Builtin::BI__builtin_sadd_overflow: |
4337 | case Builtin::BI__builtin_saddl_overflow: |
4338 | case Builtin::BI__builtin_saddll_overflow: |
4339 | case Builtin::BI__builtin_ssub_overflow: |
4340 | case Builtin::BI__builtin_ssubl_overflow: |
4341 | case Builtin::BI__builtin_ssubll_overflow: |
4342 | case Builtin::BI__builtin_smul_overflow: |
4343 | case Builtin::BI__builtin_smull_overflow: |
4344 | case Builtin::BI__builtin_smulll_overflow: { |
4345 | |
4346 | |
4347 | |
4348 | |
4349 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
4350 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
4351 | Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2)); |
4352 | |
4353 | |
4354 | llvm::Intrinsic::ID IntrinsicId; |
4355 | switch (BuiltinID) { |
4356 | default: llvm_unreachable("Unknown overflow builtin id."); |
4357 | case Builtin::BI__builtin_uadd_overflow: |
4358 | case Builtin::BI__builtin_uaddl_overflow: |
4359 | case Builtin::BI__builtin_uaddll_overflow: |
4360 | IntrinsicId = llvm::Intrinsic::uadd_with_overflow; |
4361 | break; |
4362 | case Builtin::BI__builtin_usub_overflow: |
4363 | case Builtin::BI__builtin_usubl_overflow: |
4364 | case Builtin::BI__builtin_usubll_overflow: |
4365 | IntrinsicId = llvm::Intrinsic::usub_with_overflow; |
4366 | break; |
4367 | case Builtin::BI__builtin_umul_overflow: |
4368 | case Builtin::BI__builtin_umull_overflow: |
4369 | case Builtin::BI__builtin_umulll_overflow: |
4370 | IntrinsicId = llvm::Intrinsic::umul_with_overflow; |
4371 | break; |
4372 | case Builtin::BI__builtin_sadd_overflow: |
4373 | case Builtin::BI__builtin_saddl_overflow: |
4374 | case Builtin::BI__builtin_saddll_overflow: |
4375 | IntrinsicId = llvm::Intrinsic::sadd_with_overflow; |
4376 | break; |
4377 | case Builtin::BI__builtin_ssub_overflow: |
4378 | case Builtin::BI__builtin_ssubl_overflow: |
4379 | case Builtin::BI__builtin_ssubll_overflow: |
4380 | IntrinsicId = llvm::Intrinsic::ssub_with_overflow; |
4381 | break; |
4382 | case Builtin::BI__builtin_smul_overflow: |
4383 | case Builtin::BI__builtin_smull_overflow: |
4384 | case Builtin::BI__builtin_smulll_overflow: |
4385 | IntrinsicId = llvm::Intrinsic::smul_with_overflow; |
4386 | break; |
4387 | } |
4388 | |
4389 | |
4390 | llvm::Value *Carry; |
4391 | llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry); |
4392 | Builder.CreateStore(Sum, SumOutPtr); |
4393 | |
4394 | return RValue::get(Carry); |
4395 | } |
4396 | case Builtin::BI__builtin_addressof: |
4397 | return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this)); |
4398 | case Builtin::BI__builtin_operator_new: |
4399 | return EmitBuiltinNewDeleteCall( |
4400 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false); |
4401 | case Builtin::BI__builtin_operator_delete: |
4402 | return EmitBuiltinNewDeleteCall( |
4403 | E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true); |
4404 | |
4405 | case Builtin::BI__builtin_is_aligned: |
4406 | return EmitBuiltinIsAligned(E); |
4407 | case Builtin::BI__builtin_align_up: |
4408 | return EmitBuiltinAlignTo(E, true); |
4409 | case Builtin::BI__builtin_align_down: |
4410 | return EmitBuiltinAlignTo(E, false); |
4411 | |
4412 | case Builtin::BI__noop: |
4413 | |
4414 | return RValue::get(ConstantInt::get(IntTy, 0)); |
4415 | case Builtin::BI__builtin_call_with_static_chain: { |
4416 | const CallExpr *Call = cast<CallExpr>(E->getArg(0)); |
4417 | const Expr *Chain = E->getArg(1); |
4418 | return EmitCall(Call->getCallee()->getType(), |
4419 | EmitCallee(Call->getCallee()), Call, ReturnValue, |
4420 | EmitScalarExpr(Chain)); |
4421 | } |
4422 | case Builtin::BI_InterlockedExchange8: |
4423 | case Builtin::BI_InterlockedExchange16: |
4424 | case Builtin::BI_InterlockedExchange: |
4425 | case Builtin::BI_InterlockedExchangePointer: |
4426 | return RValue::get( |
4427 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E)); |
4428 | case Builtin::BI_InterlockedCompareExchangePointer: |
4429 | case Builtin::BI_InterlockedCompareExchangePointer_nf: { |
4430 | llvm::Type *RTy; |
4431 | llvm::IntegerType *IntType = |
4432 | IntegerType::get(getLLVMContext(), |
4433 | getContext().getTypeSize(E->getType())); |
4434 | llvm::Type *IntPtrType = IntType->getPointerTo(); |
4435 | |
4436 | llvm::Value *Destination = |
4437 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType); |
4438 | |
4439 | llvm::Value *Exchange = EmitScalarExpr(E->getArg(1)); |
4440 | RTy = Exchange->getType(); |
4441 | Exchange = Builder.CreatePtrToInt(Exchange, IntType); |
4442 | |
4443 | llvm::Value *Comparand = |
4444 | Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); |
4445 | |
4446 | auto Ordering = |
4447 | BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ? |
4448 | AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent; |
4449 | |
4450 | auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, |
4451 | Ordering, Ordering); |
4452 | Result->setVolatile(true); |
4453 | |
4454 | return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, |
4455 | 0), |
4456 | RTy)); |
4457 | } |
4458 | case Builtin::BI_InterlockedCompareExchange8: |
4459 | case Builtin::BI_InterlockedCompareExchange16: |
4460 | case Builtin::BI_InterlockedCompareExchange: |
4461 | case Builtin::BI_InterlockedCompareExchange64: |
4462 | return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E)); |
4463 | case Builtin::BI_InterlockedIncrement16: |
4464 | case Builtin::BI_InterlockedIncrement: |
4465 | return RValue::get( |
4466 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E)); |
4467 | case Builtin::BI_InterlockedDecrement16: |
4468 | case Builtin::BI_InterlockedDecrement: |
4469 | return RValue::get( |
4470 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E)); |
4471 | case Builtin::BI_InterlockedAnd8: |
4472 | case Builtin::BI_InterlockedAnd16: |
4473 | case Builtin::BI_InterlockedAnd: |
4474 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E)); |
4475 | case Builtin::BI_InterlockedExchangeAdd8: |
4476 | case Builtin::BI_InterlockedExchangeAdd16: |
4477 | case Builtin::BI_InterlockedExchangeAdd: |
4478 | return RValue::get( |
4479 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E)); |
4480 | case Builtin::BI_InterlockedExchangeSub8: |
4481 | case Builtin::BI_InterlockedExchangeSub16: |
4482 | case Builtin::BI_InterlockedExchangeSub: |
4483 | return RValue::get( |
4484 | EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E)); |
4485 | case Builtin::BI_InterlockedOr8: |
4486 | case Builtin::BI_InterlockedOr16: |
4487 | case Builtin::BI_InterlockedOr: |
4488 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E)); |
4489 | case Builtin::BI_InterlockedXor8: |
4490 | case Builtin::BI_InterlockedXor16: |
4491 | case Builtin::BI_InterlockedXor: |
4492 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E)); |
4493 | |
4494 | case Builtin::BI_bittest64: |
4495 | case Builtin::BI_bittest: |
4496 | case Builtin::BI_bittestandcomplement64: |
4497 | case Builtin::BI_bittestandcomplement: |
4498 | case Builtin::BI_bittestandreset64: |
4499 | case Builtin::BI_bittestandreset: |
4500 | case Builtin::BI_bittestandset64: |
4501 | case Builtin::BI_bittestandset: |
4502 | case Builtin::BI_interlockedbittestandreset: |
4503 | case Builtin::BI_interlockedbittestandreset64: |
4504 | case Builtin::BI_interlockedbittestandset64: |
4505 | case Builtin::BI_interlockedbittestandset: |
4506 | case Builtin::BI_interlockedbittestandset_acq: |
4507 | case Builtin::BI_interlockedbittestandset_rel: |
4508 | case Builtin::BI_interlockedbittestandset_nf: |
4509 | case Builtin::BI_interlockedbittestandreset_acq: |
4510 | case Builtin::BI_interlockedbittestandreset_rel: |
4511 | case Builtin::BI_interlockedbittestandreset_nf: |
4512 | return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E)); |
4513 | |
4514 | |
4515 | |
4516 | case Builtin::BI__iso_volatile_load8: |
4517 | case Builtin::BI__iso_volatile_load16: |
4518 | case Builtin::BI__iso_volatile_load32: |
4519 | case Builtin::BI__iso_volatile_load64: |
4520 | return RValue::get(EmitISOVolatileLoad(*this, E)); |
4521 | case Builtin::BI__iso_volatile_store8: |
4522 | case Builtin::BI__iso_volatile_store16: |
4523 | case Builtin::BI__iso_volatile_store32: |
4524 | case Builtin::BI__iso_volatile_store64: |
4525 | return RValue::get(EmitISOVolatileStore(*this, E)); |
4526 | |
4527 | case Builtin::BI__exception_code: |
4528 | case Builtin::BI_exception_code: |
4529 | return RValue::get(EmitSEHExceptionCode()); |
4530 | case Builtin::BI__exception_info: |
4531 | case Builtin::BI_exception_info: |
4532 | return RValue::get(EmitSEHExceptionInfo()); |
4533 | case Builtin::BI__abnormal_termination: |
4534 | case Builtin::BI_abnormal_termination: |
4535 | return RValue::get(EmitSEHAbnormalTermination()); |
4536 | case Builtin::BI_setjmpex: |
4537 | if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && |
4538 | E->getArg(0)->getType()->isPointerType()) |
4539 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); |
4540 | break; |
4541 | case Builtin::BI_setjmp: |
4542 | if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && |
4543 | E->getArg(0)->getType()->isPointerType()) { |
4544 | if (getTarget().getTriple().getArch() == llvm::Triple::x86) |
4545 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E); |
4546 | else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64) |
4547 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E); |
4548 | return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E); |
4549 | } |
4550 | break; |
4551 | |
4552 | case Builtin::BI__GetExceptionInfo: { |
4553 | if (llvm::GlobalVariable *GV = |
4554 | CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType())) |
4555 | return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy)); |
4556 | break; |
4557 | } |
4558 | |
4559 | case Builtin::BI__fastfail: |
4560 | return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E)); |
4561 | |
4562 | case Builtin::BI__builtin_coro_size: { |
4563 | auto & Context = getContext(); |
4564 | auto SizeTy = Context.getSizeType(); |
4565 | auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); |
4566 | Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T); |
4567 | return RValue::get(Builder.CreateCall(F)); |
4568 | } |
4569 | |
4570 | case Builtin::BI__builtin_coro_id: |
4571 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_id); |
4572 | case Builtin::BI__builtin_coro_promise: |
4573 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise); |
4574 | case Builtin::BI__builtin_coro_resume: |
4575 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume); |
4576 | case Builtin::BI__builtin_coro_frame: |
4577 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame); |
4578 | case Builtin::BI__builtin_coro_noop: |
4579 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop); |
4580 | case Builtin::BI__builtin_coro_free: |
4581 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_free); |
4582 | case Builtin::BI__builtin_coro_destroy: |
4583 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy); |
4584 | case Builtin::BI__builtin_coro_done: |
4585 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_done); |
4586 | case Builtin::BI__builtin_coro_alloc: |
4587 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc); |
4588 | case Builtin::BI__builtin_coro_begin: |
4589 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin); |
4590 | case Builtin::BI__builtin_coro_end: |
4591 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_end); |
4592 | case Builtin::BI__builtin_coro_suspend: |
4593 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend); |
4594 | case Builtin::BI__builtin_coro_param: |
4595 | return EmitCoroutineIntrinsic(E, Intrinsic::coro_param); |
4596 | |
4597 | |
4598 | case Builtin::BIread_pipe: |
4599 | case Builtin::BIwrite_pipe: { |
4600 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
4601 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
4602 | CGOpenCLRuntime OpenCLRT(CGM); |
4603 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4604 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4605 | |
4606 | |
4607 | unsigned GenericAS = |
4608 | getContext().getTargetAddressSpace(LangAS::opencl_generic); |
4609 | llvm::Type *I8PTy = llvm::PointerType::get( |
4610 | llvm::Type::getInt8Ty(getLLVMContext()), GenericAS); |
4611 | |
4612 | |
4613 | if (2U == E->getNumArgs()) { |
4614 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2" |
4615 | : "__write_pipe_2"; |
4616 | |
4617 | |
4618 | llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty}; |
4619 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4620 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4621 | Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy); |
4622 | return RValue::get( |
4623 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4624 | {Arg0, BCast, PacketSize, PacketAlign})); |
4625 | } else { |
4626 | assert(4 == E->getNumArgs() && |
4627 | "Illegal number of parameters to pipe function"); |
4628 | const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4" |
4629 | : "__write_pipe_4"; |
4630 | |
4631 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy, |
4632 | Int32Ty, Int32Ty}; |
4633 | Value *Arg2 = EmitScalarExpr(E->getArg(2)), |
4634 | *Arg3 = EmitScalarExpr(E->getArg(3)); |
4635 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4636 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4637 | Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy); |
4638 | |
4639 | |
4640 | if (Arg2->getType() != Int32Ty) |
4641 | Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty); |
4642 | return RValue::get( |
4643 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4644 | {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign})); |
4645 | } |
4646 | } |
4647 | |
4648 | |
4649 | case Builtin::BIreserve_read_pipe: |
4650 | case Builtin::BIreserve_write_pipe: |
4651 | case Builtin::BIwork_group_reserve_read_pipe: |
4652 | case Builtin::BIwork_group_reserve_write_pipe: |
4653 | case Builtin::BIsub_group_reserve_read_pipe: |
4654 | case Builtin::BIsub_group_reserve_write_pipe: { |
4655 | |
4656 | const char *Name; |
4657 | if (BuiltinID == Builtin::BIreserve_read_pipe) |
4658 | Name = "__reserve_read_pipe"; |
4659 | else if (BuiltinID == Builtin::BIreserve_write_pipe) |
4660 | Name = "__reserve_write_pipe"; |
4661 | else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe) |
4662 | Name = "__work_group_reserve_read_pipe"; |
4663 | else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe) |
4664 | Name = "__work_group_reserve_write_pipe"; |
4665 | else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe) |
4666 | Name = "__sub_group_reserve_read_pipe"; |
4667 | else |
4668 | Name = "__sub_group_reserve_write_pipe"; |
4669 | |
4670 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
4671 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
4672 | llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy); |
4673 | CGOpenCLRuntime OpenCLRT(CGM); |
4674 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4675 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4676 | |
4677 | |
4678 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty}; |
4679 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4680 | ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4681 | |
4682 | |
4683 | if (Arg1->getType() != Int32Ty) |
4684 | Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty); |
4685 | return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4686 | {Arg0, Arg1, PacketSize, PacketAlign})); |
4687 | } |
4688 | |
4689 | |
4690 | case Builtin::BIcommit_read_pipe: |
4691 | case Builtin::BIcommit_write_pipe: |
4692 | case Builtin::BIwork_group_commit_read_pipe: |
4693 | case Builtin::BIwork_group_commit_write_pipe: |
4694 | case Builtin::BIsub_group_commit_read_pipe: |
4695 | case Builtin::BIsub_group_commit_write_pipe: { |
4696 | const char *Name; |
4697 | if (BuiltinID == Builtin::BIcommit_read_pipe) |
4698 | Name = "__commit_read_pipe"; |
4699 | else if (BuiltinID == Builtin::BIcommit_write_pipe) |
4700 | Name = "__commit_write_pipe"; |
4701 | else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe) |
4702 | Name = "__work_group_commit_read_pipe"; |
4703 | else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe) |
4704 | Name = "__work_group_commit_write_pipe"; |
4705 | else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe) |
4706 | Name = "__sub_group_commit_read_pipe"; |
4707 | else |
4708 | Name = "__sub_group_commit_write_pipe"; |
4709 | |
4710 | Value *Arg0 = EmitScalarExpr(E->getArg(0)), |
4711 | *Arg1 = EmitScalarExpr(E->getArg(1)); |
4712 | CGOpenCLRuntime OpenCLRT(CGM); |
4713 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4714 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4715 | |
4716 | |
4717 | llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty}; |
4718 | llvm::FunctionType *FTy = |
4719 | llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), |
4720 | llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4721 | |
4722 | return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4723 | {Arg0, Arg1, PacketSize, PacketAlign})); |
4724 | } |
4725 | |
4726 | case Builtin::BIget_pipe_num_packets: |
4727 | case Builtin::BIget_pipe_max_packets: { |
4728 | const char *BaseName; |
4729 | const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>(); |
4730 | if (BuiltinID == Builtin::BIget_pipe_num_packets) |
4731 | BaseName = "__get_pipe_num_packets"; |
4732 | else |
4733 | BaseName = "__get_pipe_max_packets"; |
4734 | std::string Name = std::string(BaseName) + |
4735 | std::string(PipeTy->isReadOnly() ? "_ro" : "_wo"); |
4736 | |
4737 | |
4738 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
4739 | CGOpenCLRuntime OpenCLRT(CGM); |
4740 | Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); |
4741 | Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); |
4742 | llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty}; |
4743 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4744 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4745 | |
4746 | return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4747 | {Arg0, PacketSize, PacketAlign})); |
4748 | } |
4749 | |
4750 | |
4751 | case Builtin::BIto_global: |
4752 | case Builtin::BIto_local: |
4753 | case Builtin::BIto_private: { |
4754 | auto Arg0 = EmitScalarExpr(E->getArg(0)); |
4755 | auto NewArgT = llvm::PointerType::get(Int8Ty, |
4756 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4757 | auto NewRetT = llvm::PointerType::get(Int8Ty, |
4758 | CGM.getContext().getTargetAddressSpace( |
4759 | E->getType()->getPointeeType().getAddressSpace())); |
4760 | auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false); |
4761 | llvm::Value *NewArg; |
4762 | if (Arg0->getType()->getPointerAddressSpace() != |
4763 | NewArgT->getPointerAddressSpace()) |
4764 | NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT); |
4765 | else |
4766 | NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT); |
4767 | auto NewName = std::string("__") + E->getDirectCallee()->getName().str(); |
4768 | auto NewCall = |
4769 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg}); |
4770 | return RValue::get(Builder.CreateBitOrPointerCast(NewCall, |
4771 | ConvertType(E->getType()))); |
4772 | } |
4773 | |
4774 | |
4775 | |
4776 | case Builtin::BIenqueue_kernel: { |
4777 | StringRef Name; |
4778 | unsigned NumArgs = E->getNumArgs(); |
4779 | |
4780 | llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy); |
4781 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
4782 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4783 | |
4784 | llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); |
4785 | llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); |
4786 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(2)); |
4787 | llvm::Value *Range = NDRangeL.getAddress(*this).getPointer(); |
4788 | llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType(); |
4789 | |
4790 | if (NumArgs == 4) { |
4791 | |
4792 | |
4793 | Name = "__enqueue_kernel_basic"; |
4794 | llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy, |
4795 | GenericVoidPtrTy}; |
4796 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4797 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4798 | |
4799 | auto Info = |
4800 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); |
4801 | llvm::Value *Kernel = |
4802 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4803 | llvm::Value *Block = |
4804 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4805 | |
4806 | AttrBuilder B; |
4807 | B.addByValAttr(NDRangeL.getAddress(*this).getElementType()); |
4808 | llvm::AttributeList ByValAttrSet = |
4809 | llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B); |
4810 | |
4811 | auto RTCall = |
4812 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet), |
4813 | {Queue, Flags, Range, Kernel, Block}); |
4814 | RTCall->setAttributes(ByValAttrSet); |
4815 | return RValue::get(RTCall); |
4816 | } |
4817 | assert(NumArgs >= 5 && "Invalid enqueue_kernel signature"); |
4818 | |
4819 | |
4820 | |
4821 | auto CreateArrayForSizeVar = [=](unsigned First) |
4822 | -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> { |
4823 | llvm::APInt ArraySize(32, NumArgs - First); |
4824 | QualType SizeArrayTy = getContext().getConstantArrayType( |
4825 | getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal, |
4826 | 0); |
4827 | auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes"); |
4828 | llvm::Value *TmpPtr = Tmp.getPointer(); |
4829 | llvm::Value *TmpSize = EmitLifetimeStart( |
4830 | CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr); |
4831 | llvm::Value *ElemPtr; |
4832 | |
4833 | |
4834 | auto *Zero = llvm::ConstantInt::get(IntTy, 0); |
4835 | for (unsigned I = First; I < NumArgs; ++I) { |
4836 | auto *Index = llvm::ConstantInt::get(IntTy, I - First); |
4837 | auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr, |
4838 | {Zero, Index}); |
4839 | if (I == First) |
4840 | ElemPtr = GEP; |
4841 | auto *V = |
4842 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy); |
4843 | Builder.CreateAlignedStore( |
4844 | V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy)); |
4845 | } |
4846 | return std::tie(ElemPtr, TmpSize, TmpPtr); |
4847 | }; |
4848 | |
4849 | |
4850 | if (E->getArg(3)->getType()->isBlockPointerType()) { |
4851 | |
4852 | Name = "__enqueue_kernel_varargs"; |
4853 | auto Info = |
4854 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); |
4855 | llvm::Value *Kernel = |
4856 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4857 | auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4858 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
4859 | std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4); |
4860 | |
4861 | |
4862 | |
4863 | llvm::Value *const Args[] = {Queue, Flags, |
4864 | Range, Kernel, |
4865 | Block, ConstantInt::get(IntTy, NumArgs - 4), |
4866 | ElemPtr}; |
4867 | llvm::Type *const ArgTys[] = { |
4868 | QueueTy, IntTy, RangeTy, GenericVoidPtrTy, |
4869 | GenericVoidPtrTy, IntTy, ElemPtr->getType()}; |
4870 | |
4871 | llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false); |
4872 | auto Call = RValue::get( |
4873 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args)); |
4874 | if (TmpSize) |
4875 | EmitLifetimeEnd(TmpSize, TmpPtr); |
4876 | return Call; |
4877 | } |
4878 | |
4879 | if (NumArgs >= 7) { |
4880 | llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy); |
4881 | llvm::PointerType *EventPtrTy = EventTy->getPointerTo( |
4882 | CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4883 | |
4884 | llvm::Value *NumEvents = |
4885 | Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty); |
4886 | |
4887 | |
4888 | |
4889 | |
4890 | llvm::Value *EventWaitList = nullptr; |
4891 | if (E->getArg(4)->isNullPointerConstant( |
4892 | getContext(), Expr::NPC_ValueDependentIsNotNull)) { |
4893 | EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy); |
4894 | } else { |
4895 | EventWaitList = E->getArg(4)->getType()->isArrayType() |
4896 | ? EmitArrayToPointerDecay(E->getArg(4)).getPointer() |
4897 | : EmitScalarExpr(E->getArg(4)); |
4898 | |
4899 | EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy); |
4900 | } |
4901 | llvm::Value *EventRet = nullptr; |
4902 | if (E->getArg(5)->isNullPointerConstant( |
4903 | getContext(), Expr::NPC_ValueDependentIsNotNull)) { |
4904 | EventRet = llvm::ConstantPointerNull::get(EventPtrTy); |
4905 | } else { |
4906 | EventRet = |
4907 | Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy); |
4908 | } |
4909 | |
4910 | auto Info = |
4911 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6)); |
4912 | llvm::Value *Kernel = |
4913 | Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4914 | llvm::Value *Block = |
4915 | Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4916 | |
4917 | std::vector<llvm::Type *> ArgTys = { |
4918 | QueueTy, Int32Ty, RangeTy, Int32Ty, |
4919 | EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy}; |
4920 | |
4921 | std::vector<llvm::Value *> Args = {Queue, Flags, Range, |
4922 | NumEvents, EventWaitList, EventRet, |
4923 | Kernel, Block}; |
4924 | |
4925 | if (NumArgs == 7) { |
4926 | |
4927 | Name = "__enqueue_kernel_basic_events"; |
4928 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4929 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4930 | return RValue::get( |
4931 | EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4932 | llvm::ArrayRef<llvm::Value *>(Args))); |
4933 | } |
4934 | |
4935 | |
4936 | Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7)); |
4937 | ArgTys.push_back(Int32Ty); |
4938 | Name = "__enqueue_kernel_events_varargs"; |
4939 | |
4940 | llvm::Value *ElemPtr, *TmpSize, *TmpPtr; |
4941 | std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7); |
4942 | Args.push_back(ElemPtr); |
4943 | ArgTys.push_back(ElemPtr->getType()); |
4944 | |
4945 | llvm::FunctionType *FTy = llvm::FunctionType::get( |
4946 | Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false); |
4947 | auto Call = |
4948 | RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), |
4949 | llvm::ArrayRef<llvm::Value *>(Args))); |
4950 | if (TmpSize) |
4951 | EmitLifetimeEnd(TmpSize, TmpPtr); |
4952 | return Call; |
4953 | } |
4954 | LLVM_FALLTHROUGH; |
4955 | } |
4956 | |
4957 | |
4958 | case Builtin::BIget_kernel_work_group_size: { |
4959 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
4960 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4961 | auto Info = |
4962 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); |
4963 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4964 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4965 | return RValue::get(EmitRuntimeCall( |
4966 | CGM.CreateRuntimeFunction( |
4967 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, |
4968 | false), |
4969 | "__get_kernel_work_group_size_impl"), |
4970 | {Kernel, Arg})); |
4971 | } |
4972 | case Builtin::BIget_kernel_preferred_work_group_size_multiple: { |
4973 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
4974 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4975 | auto Info = |
4976 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); |
4977 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4978 | Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4979 | return RValue::get(EmitRuntimeCall( |
4980 | CGM.CreateRuntimeFunction( |
4981 | llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy}, |
4982 | false), |
4983 | "__get_kernel_preferred_work_group_size_multiple_impl"), |
4984 | {Kernel, Arg})); |
4985 | } |
4986 | case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: |
4987 | case Builtin::BIget_kernel_sub_group_count_for_ndrange: { |
4988 | llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy( |
4989 | getContext().getTargetAddressSpace(LangAS::opencl_generic)); |
4990 | LValue NDRangeL = EmitAggExprToLValue(E->getArg(0)); |
4991 | llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer(); |
4992 | auto Info = |
4993 | CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1)); |
4994 | Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy); |
4995 | Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy); |
4996 | const char *Name = |
4997 | BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange |
4998 | ? "__get_kernel_max_sub_group_size_for_ndrange_impl" |
4999 | : "__get_kernel_sub_group_count_for_ndrange_impl"; |
5000 | return RValue::get(EmitRuntimeCall( |
5001 | CGM.CreateRuntimeFunction( |
5002 | llvm::FunctionType::get( |
5003 | IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy}, |
5004 | false), |
5005 | Name), |
5006 | {NDRange, Kernel, Block})); |
5007 | } |
5008 | |
5009 | case Builtin::BI__builtin_store_half: |
5010 | case Builtin::BI__builtin_store_halff: { |
5011 | Value *Val = EmitScalarExpr(E->getArg(0)); |
5012 | Address Address = EmitPointerWithAlignment(E->getArg(1)); |
5013 | Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy()); |
5014 | return RValue::get(Builder.CreateStore(HalfVal, Address)); |
5015 | } |
5016 | case Builtin::BI__builtin_load_half: { |
5017 | Address Address = EmitPointerWithAlignment(E->getArg(0)); |
5018 | Value *HalfVal = Builder.CreateLoad(Address); |
5019 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy())); |
5020 | } |
5021 | case Builtin::BI__builtin_load_halff: { |
5022 | Address Address = EmitPointerWithAlignment(E->getArg(0)); |
5023 | Value *HalfVal = Builder.CreateLoad(Address); |
5024 | return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy())); |
5025 | } |
5026 | case Builtin::BIprintf: |
5027 | if (getTarget().getTriple().isNVPTX()) |
5028 | return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue); |
5029 | if (getTarget().getTriple().getArch() == Triple::amdgcn && |
5030 | getLangOpts().HIP) |
5031 | return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue); |
5032 | break; |
5033 | case Builtin::BI__builtin_canonicalize: |
5034 | case Builtin::BI__builtin_canonicalizef: |
5035 | case Builtin::BI__builtin_canonicalizef16: |
5036 | case Builtin::BI__builtin_canonicalizel: |
5037 | return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize)); |
5038 | |
5039 | case Builtin::BI__builtin_thread_pointer: { |
5040 | if (!getContext().getTargetInfo().isTLSSupported()) |
5041 | CGM.ErrorUnsupported(E, "__builtin_thread_pointer"); |
5042 | |
5043 | break; |
5044 | } |
5045 | case Builtin::BI__builtin_os_log_format: |
5046 | return emitBuiltinOSLogFormat(*E); |
5047 | |
5048 | case Builtin::BI__xray_customevent: { |
5049 | if (!ShouldXRayInstrumentFunction()) |
5050 | return RValue::getIgnored(); |
5051 | |
5052 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
5053 | XRayInstrKind::Custom)) |
5054 | return RValue::getIgnored(); |
5055 | |
5056 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
5057 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents()) |
5058 | return RValue::getIgnored(); |
5059 | |
5060 | Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent); |
5061 | auto FTy = F->getFunctionType(); |
5062 | auto Arg0 = E->getArg(0); |
5063 | auto Arg0Val = EmitScalarExpr(Arg0); |
5064 | auto Arg0Ty = Arg0->getType(); |
5065 | auto PTy0 = FTy->getParamType(0); |
5066 | if (PTy0 != Arg0Val->getType()) { |
5067 | if (Arg0Ty->isArrayType()) |
5068 | Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer(); |
5069 | else |
5070 | Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0); |
5071 | } |
5072 | auto Arg1 = EmitScalarExpr(E->getArg(1)); |
5073 | auto PTy1 = FTy->getParamType(1); |
5074 | if (PTy1 != Arg1->getType()) |
5075 | Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1); |
5076 | return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1})); |
5077 | } |
5078 | |
5079 | case Builtin::BI__xray_typedevent: { |
5080 | |
5081 | |
5082 | |
5083 | if (!ShouldXRayInstrumentFunction()) |
5084 | return RValue::getIgnored(); |
5085 | |
5086 | if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( |
5087 | XRayInstrKind::Typed)) |
5088 | return RValue::getIgnored(); |
5089 | |
5090 | if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) |
5091 | if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents()) |
5092 | return RValue::getIgnored(); |
5093 | |
5094 | Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent); |
5095 | auto FTy = F->getFunctionType(); |
5096 | auto Arg0 = EmitScalarExpr(E->getArg(0)); |
5097 | auto PTy0 = FTy->getParamType(0); |
5098 | if (PTy0 != Arg0->getType()) |
5099 | Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0); |
5100 | auto Arg1 = E->getArg(1); |
5101 | auto Arg1Val = EmitScalarExpr(Arg1); |
5102 | auto Arg1Ty = Arg1->getType(); |
5103 | auto PTy1 = FTy->getParamType(1); |
5104 | if (PTy1 != Arg1Val->getType()) { |
5105 | if (Arg1Ty->isArrayType()) |
5106 | Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer(); |
5107 | else |
5108 | Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1); |
5109 | } |
5110 | auto Arg2 = EmitScalarExpr(E->getArg(2)); |
5111 | auto PTy2 = FTy->getParamType(2); |
5112 | if (PTy2 != Arg2->getType()) |
5113 | Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2); |
5114 | return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2})); |
5115 | } |
5116 | |
5117 | case Builtin::BI__builtin_ms_va_start: |
5118 | case Builtin::BI__builtin_ms_va_end: |
5119 | return RValue::get( |
5120 | EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(), |
5121 | BuiltinID == Builtin::BI__builtin_ms_va_start)); |
5122 | |
5123 | case Builtin::BI__builtin_ms_va_copy: { |
5124 | |
5125 | |
5126 | |
5127 | |
5128 | |
5129 | |
5130 | Address DestAddr = EmitMSVAListRef(E->getArg(0)); |
5131 | Address SrcAddr = EmitMSVAListRef(E->getArg(1)); |
5132 | |
5133 | llvm::Type *BPP = Int8PtrPtrTy; |
5134 | |
5135 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"), |
5136 | DestAddr.getAlignment()); |
5137 | SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"), |
5138 | SrcAddr.getAlignment()); |
5139 | |
5140 | Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val"); |
5141 | return RValue::get(Builder.CreateStore(ArgPtr, DestAddr)); |
5142 | } |
5143 | |
5144 | case Builtin::BI__builtin_get_device_side_mangled_name: { |
5145 | auto Name = CGM.getCUDARuntime().getDeviceSideName( |
5146 | cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl()); |
5147 | auto Str = CGM.GetAddrOfConstantCString(Name, ""); |
5148 | llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0), |
5149 | llvm::ConstantInt::get(SizeTy, 0)}; |
5150 | auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(), |
5151 | Str.getPointer(), Zeros); |
5152 | return RValue::get(Ptr); |
5153 | } |
5154 | } |
5155 | |
5156 | |
5157 | |
5158 | |
5159 | if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) |
5160 | return emitLibraryCall(*this, FD, E, |
5161 | CGM.getBuiltinLibFunction(FD, BuiltinID)); |
5162 | |
5163 | |
5164 | |
5165 | if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) |
5166 | return emitLibraryCall(*this, FD, E, |
5167 | cast<llvm::Constant>(EmitScalarExpr(E->getCallee()))); |
5168 | |
5169 | |
5170 | |
5171 | |
5172 | |
5173 | |
5174 | checkTargetFeatures(E, FD); |
5175 | |
5176 | if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID)) |
5177 | LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth); |
5178 | |
5179 | |
5180 | const char *Name = getContext().BuiltinInfo.getName(BuiltinID); |
5181 | Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; |
5182 | StringRef Prefix = |
5183 | llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch()); |
5184 | if (!Prefix.empty()) { |
5185 | IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name); |
5186 | |
5187 | |
5188 | |
5189 | if (IntrinsicID == Intrinsic::not_intrinsic) |
5190 | IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name); |
5191 | } |
5192 | |
5193 | if (IntrinsicID != Intrinsic::not_intrinsic) { |
5194 | SmallVector<Value*, 16> Args; |
5195 | |
5196 | |
5197 | |
5198 | unsigned ICEArguments = 0; |
5199 | ASTContext::GetBuiltinTypeError Error; |
5200 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
5201 | assert(Error == ASTContext::GE_None && "Should not codegen an error"); |
5202 | |
5203 | Function *F = CGM.getIntrinsic(IntrinsicID); |
5204 | llvm::FunctionType *FTy = F->getFunctionType(); |
5205 | |
5206 | for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { |
5207 | Value *ArgValue; |
5208 | |
5209 | if ((ICEArguments & (1 << i)) == 0) { |
5210 | ArgValue = EmitScalarExpr(E->getArg(i)); |
5211 | } else { |
5212 | |
5213 | |
5214 | ArgValue = llvm::ConstantInt::get( |
5215 | getLLVMContext(), |
5216 | *E->getArg(i)->getIntegerConstantExpr(getContext())); |
5217 | } |
5218 | |
5219 | |
5220 | |
5221 | llvm::Type *PTy = FTy->getParamType(i); |
5222 | if (PTy != ArgValue->getType()) { |
5223 | |
5224 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) { |
5225 | if (PtrTy->getAddressSpace() != |
5226 | ArgValue->getType()->getPointerAddressSpace()) { |
5227 | ArgValue = Builder.CreateAddrSpaceCast( |
5228 | ArgValue, |
5229 | ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace())); |
5230 | } |
5231 | } |
5232 | |
5233 | assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && |
5234 | "Must be able to losslessly bit cast to param"); |
5235 | ArgValue = Builder.CreateBitCast(ArgValue, PTy); |
5236 | } |
5237 | |
5238 | Args.push_back(ArgValue); |
5239 | } |
5240 | |
5241 | Value *V = Builder.CreateCall(F, Args); |
5242 | QualType BuiltinRetType = E->getType(); |
5243 | |
5244 | llvm::Type *RetTy = VoidTy; |
5245 | if (!BuiltinRetType->isVoidType()) |
5246 | RetTy = ConvertType(BuiltinRetType); |
5247 | |
5248 | if (RetTy != V->getType()) { |
5249 | |
5250 | if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) { |
5251 | if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) { |
5252 | V = Builder.CreateAddrSpaceCast( |
5253 | V, V->getType()->getPointerTo(PtrTy->getAddressSpace())); |
5254 | } |
5255 | } |
5256 | |
5257 | assert(V->getType()->canLosslesslyBitCastTo(RetTy) && |
5258 | "Must be able to losslessly bit cast result type"); |
5259 | V = Builder.CreateBitCast(V, RetTy); |
5260 | } |
5261 | |
5262 | return RValue::get(V); |
5263 | } |
5264 | |
5265 | |
5266 | |
5267 | |
5268 | |
5269 | TypeEvaluationKind EvalKind = getEvaluationKind(E->getType()); |
5270 | if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) { |
5271 | Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp"); |
5272 | ReturnValue = ReturnValueSlot(DestPtr, false); |
5273 | } |
5274 | |
5275 | |
5276 | if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { |
5277 | switch (EvalKind) { |
5278 | case TEK_Scalar: |
5279 | return RValue::get(V); |
5280 | case TEK_Aggregate: |
5281 | return RValue::getAggregate(ReturnValue.getValue(), |
5282 | ReturnValue.isVolatile()); |
5283 | case TEK_Complex: |
5284 | llvm_unreachable("No current target builtin returns complex"); |
5285 | } |
5286 | llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr"); |
5287 | } |
5288 | |
5289 | ErrorUnsupported(E, "builtin function"); |
5290 | |
5291 | |
5292 | return GetUndefRValue(E->getType()); |
5293 | } |
5294 | |
5295 | static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, |
5296 | unsigned BuiltinID, const CallExpr *E, |
5297 | ReturnValueSlot ReturnValue, |
5298 | llvm::Triple::ArchType Arch) { |
5299 | switch (Arch) { |
5300 | case llvm::Triple::arm: |
5301 | case llvm::Triple::armeb: |
5302 | case llvm::Triple::thumb: |
5303 | case llvm::Triple::thumbeb: |
5304 | return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch); |
5305 | case llvm::Triple::aarch64: |
5306 | case llvm::Triple::aarch64_32: |
5307 | case llvm::Triple::aarch64_be: |
5308 | return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch); |
5309 | case llvm::Triple::bpfeb: |
5310 | case llvm::Triple::bpfel: |
5311 | return CGF->EmitBPFBuiltinExpr(BuiltinID, E); |
5312 | case llvm::Triple::x86: |
5313 | case llvm::Triple::x86_64: |
5314 | return CGF->EmitX86BuiltinExpr(BuiltinID, E); |
5315 | case llvm::Triple::ppc: |
5316 | case llvm::Triple::ppcle: |
5317 | case llvm::Triple::ppc64: |
5318 | case llvm::Triple::ppc64le: |
5319 | return CGF->EmitPPCBuiltinExpr(BuiltinID, E); |
5320 | case llvm::Triple::r600: |
5321 | case llvm::Triple::amdgcn: |
5322 | return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); |
5323 | case llvm::Triple::systemz: |
5324 | return CGF->EmitSystemZBuiltinExpr(BuiltinID, E); |
5325 | case llvm::Triple::nvptx: |
5326 | case llvm::Triple::nvptx64: |
5327 | return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E); |
5328 | case llvm::Triple::wasm32: |
5329 | case llvm::Triple::wasm64: |
5330 | return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E); |
5331 | case llvm::Triple::hexagon: |
5332 | return CGF->EmitHexagonBuiltinExpr(BuiltinID, E); |
5333 | case llvm::Triple::riscv32: |
5334 | case llvm::Triple::riscv64: |
5335 | return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue); |
5336 | default: |
5337 | return nullptr; |
5338 | } |
5339 | } |
5340 | |
5341 | Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, |
5342 | const CallExpr *E, |
5343 | ReturnValueSlot ReturnValue) { |
5344 | if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) { |
5345 | assert(getContext().getAuxTargetInfo() && "Missing aux target info"); |
5346 | return EmitTargetArchBuiltinExpr( |
5347 | this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E, |
5348 | ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch()); |
5349 | } |
5350 | |
5351 | return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, |
5352 | getTarget().getTriple().getArch()); |
5353 | } |
5354 | |
5355 | static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF, |
5356 | NeonTypeFlags TypeFlags, |
5357 | bool HasLegalHalfType = true, |
5358 | bool V1Ty = false, |
5359 | bool AllowBFloatArgsAndRet = true) { |
5360 | int IsQuad = TypeFlags.isQuad(); |
5361 | switch (TypeFlags.getEltType()) { |
5362 | case NeonTypeFlags::Int8: |
5363 | case NeonTypeFlags::Poly8: |
5364 | return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad)); |
5365 | case NeonTypeFlags::Int16: |
5366 | case NeonTypeFlags::Poly16: |
5367 | return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
5368 | case NeonTypeFlags::BFloat16: |
5369 | if (AllowBFloatArgsAndRet) |
5370 | return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad)); |
5371 | else |
5372 | return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
5373 | case NeonTypeFlags::Float16: |
5374 | if (HasLegalHalfType) |
5375 | return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad)); |
5376 | else |
5377 | return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); |
5378 | case NeonTypeFlags::Int32: |
5379 | return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); |
5380 | case NeonTypeFlags::Int64: |
5381 | case NeonTypeFlags::Poly64: |
5382 | return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad)); |
5383 | case NeonTypeFlags::Poly128: |
5384 | |
5385 | |
5386 | |
5387 | return llvm::FixedVectorType::get(CGF->Int8Ty, 16); |
5388 | case NeonTypeFlags::Float32: |
5389 | return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad)); |
5390 | case NeonTypeFlags::Float64: |
5391 | return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad)); |
5392 | } |
5393 | llvm_unreachable("Unknown vector element type!"); |
5394 | } |
5395 | |
5396 | static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF, |
5397 | NeonTypeFlags IntTypeFlags) { |
5398 | int IsQuad = IntTypeFlags.isQuad(); |
5399 | switch (IntTypeFlags.getEltType()) { |
5400 | case NeonTypeFlags::Int16: |
5401 | return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad)); |
5402 | case NeonTypeFlags::Int32: |
5403 | return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad)); |
5404 | case NeonTypeFlags::Int64: |
5405 | return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad)); |
5406 | default: |
5407 | llvm_unreachable("Type can't be converted to floating-point!"); |
5408 | } |
5409 | } |
5410 | |
5411 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C, |
5412 | const ElementCount &Count) { |
5413 | Value *SV = llvm::ConstantVector::getSplat(Count, C); |
5414 | return Builder.CreateShuffleVector(V, V, SV, "lane"); |
5415 | } |
5416 | |
5417 | Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { |
5418 | ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount(); |
5419 | return EmitNeonSplat(V, C, EC); |
5420 | } |
5421 | |
5422 | Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, |
5423 | const char *name, |
5424 | unsigned shift, bool rightshift) { |
5425 | unsigned j = 0; |
5426 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
5427 | ai != ae; ++ai, ++j) { |
5428 | if (F->isConstrainedFPIntrinsic()) |
5429 | if (ai->getType()->isMetadataTy()) |
5430 | continue; |
5431 | if (shift > 0 && shift == j) |
5432 | Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); |
5433 | else |
5434 | Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); |
5435 | } |
5436 | |
5437 | if (F->isConstrainedFPIntrinsic()) |
5438 | return Builder.CreateConstrainedFPCall(F, Ops, name); |
5439 | else |
5440 | return Builder.CreateCall(F, Ops, name); |
5441 | } |
5442 | |
5443 | Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, |
5444 | bool neg) { |
5445 | int SV = cast<ConstantInt>(V)->getSExtValue(); |
5446 | return ConstantInt::get(Ty, neg ? -SV : SV); |
5447 | } |
5448 | |
5449 | |
5450 | Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift, |
5451 | llvm::Type *Ty, bool usgn, |
5452 | const char *name) { |
5453 | llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); |
5454 | |
5455 | int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue(); |
5456 | int EltSize = VTy->getScalarSizeInBits(); |
5457 | |
5458 | Vec = Builder.CreateBitCast(Vec, Ty); |
5459 | |
5460 | |
5461 | |
5462 | if (ShiftAmt == EltSize) { |
5463 | if (usgn) { |
5464 | |
5465 | return llvm::ConstantAggregateZero::get(VTy); |
5466 | } else { |
5467 | |
5468 | |
5469 | --ShiftAmt; |
5470 | Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt); |
5471 | } |
5472 | } |
5473 | |
5474 | Shift = EmitNeonShiftVector(Shift, Ty, false); |
5475 | if (usgn) |
5476 | return Builder.CreateLShr(Vec, Shift, name); |
5477 | else |
5478 | return Builder.CreateAShr(Vec, Shift, name); |
5479 | } |
5480 | |
5481 | enum { |
5482 | AddRetType = (1 << 0), |
5483 | Add1ArgType = (1 << 1), |
5484 | Add2ArgTypes = (1 << 2), |
5485 | |
5486 | VectorizeRetType = (1 << 3), |
5487 | VectorizeArgTypes = (1 << 4), |
5488 | |
5489 | InventFloatType = (1 << 5), |
5490 | UnsignedAlts = (1 << 6), |
5491 | |
5492 | Use64BitVectors = (1 << 7), |
5493 | Use128BitVectors = (1 << 8), |
5494 | |
5495 | Vectorize1ArgType = Add1ArgType | VectorizeArgTypes, |
5496 | VectorRet = AddRetType | VectorizeRetType, |
5497 | VectorRetGetArgs01 = |
5498 | AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes, |
5499 | FpCmpzModifiers = |
5500 | AddRetType | VectorizeRetType | Add1ArgType | InventFloatType |
5501 | }; |
5502 | |
5503 | namespace { |
5504 | struct ARMVectorIntrinsicInfo { |
5505 | const char *NameHint; |
5506 | unsigned BuiltinID; |
5507 | unsigned LLVMIntrinsic; |
5508 | unsigned AltLLVMIntrinsic; |
5509 | uint64_t TypeModifier; |
5510 | |
5511 | bool operator<(unsigned RHSBuiltinID) const { |
5512 | return BuiltinID < RHSBuiltinID; |
5513 | } |
5514 | bool operator<(const ARMVectorIntrinsicInfo &TE) const { |
5515 | return BuiltinID < TE.BuiltinID; |
5516 | } |
5517 | }; |
5518 | } |
5519 | |
5520 | #define NEONMAP0(NameBase) \ |
5521 | { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 } |
5522 | |
5523 | #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
5524 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
5525 | Intrinsic::LLVMIntrinsic, 0, TypeModifier } |
5526 | |
5527 | #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ |
5528 | { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ |
5529 | Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \ |
5530 | TypeModifier } |
5531 | |
5532 | static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = { |
5533 | NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0), |
5534 | NEONMAP0(splat_lane_v), |
5535 | NEONMAP0(splat_laneq_v), |
5536 | NEONMAP0(splatq_lane_v), |
5537 | NEONMAP0(splatq_laneq_v), |
5538 | NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
5539 | NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), |
5540 | NEONMAP1(vabs_v, arm_neon_vabs, 0), |
5541 | NEONMAP1(vabsq_v, arm_neon_vabs, 0), |
5542 | NEONMAP0(vadd_v), |
5543 | NEONMAP0(vaddhn_v), |
5544 | NEONMAP0(vaddq_v), |
5545 | NEONMAP1(vaesdq_v, arm_neon_aesd, 0), |
5546 | NEONMAP1(vaeseq_v, arm_neon_aese, 0), |
5547 | NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0), |
5548 | NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0), |
5549 | NEONMAP1(vbfdot_v, arm_neon_bfdot, 0), |
5550 | NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0), |
5551 | NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0), |
5552 | NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0), |
5553 | NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0), |
5554 | NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType), |
5555 | NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType), |
5556 | NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), |
5557 | NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), |
5558 | NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), |
5559 | NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), |
5560 | NEONMAP1(vcage_v, arm_neon_vacge, 0), |
5561 | NEONMAP1(vcageq_v, arm_neon_vacge, 0), |
5562 | NEONMAP1(vcagt_v, arm_neon_vacgt, 0), |
5563 | NEONMAP1(vcagtq_v, arm_neon_vacgt, 0), |
5564 | NEONMAP1(vcale_v, arm_neon_vacge, 0), |
5565 | NEONMAP1(vcaleq_v, arm_neon_vacge, 0), |
5566 | NEONMAP1(vcalt_v, arm_neon_vacgt, 0), |
5567 | NEONMAP1(vcaltq_v, arm_neon_vacgt, 0), |
5568 | NEONMAP0(vceqz_v), |
5569 | NEONMAP0(vceqzq_v), |
5570 | NEONMAP0(vcgez_v), |
5571 | NEONMAP0(vcgezq_v), |
5572 | NEONMAP0(vcgtz_v), |
5573 | NEONMAP0(vcgtzq_v), |
5574 | NEONMAP0(vclez_v), |
5575 | NEONMAP0(vclezq_v), |
5576 | NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType), |
5577 | NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType), |
5578 | NEONMAP0(vcltz_v), |
5579 | NEONMAP0(vcltzq_v), |
5580 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
5581 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
5582 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
5583 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
5584 | NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0), |
5585 | NEONMAP0(vcvt_f16_v), |
5586 | NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0), |
5587 | NEONMAP0(vcvt_f32_v), |
5588 | NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5589 | NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5590 | NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0), |
5591 | NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
5592 | NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
5593 | NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0), |
5594 | NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
5595 | NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
5596 | NEONMAP0(vcvt_s16_v), |
5597 | NEONMAP0(vcvt_s32_v), |
5598 | NEONMAP0(vcvt_s64_v), |
5599 | NEONMAP0(vcvt_u16_v), |
5600 | NEONMAP0(vcvt_u32_v), |
5601 | NEONMAP0(vcvt_u64_v), |
5602 | NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0), |
5603 | NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0), |
5604 | NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0), |
5605 | NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0), |
5606 | NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0), |
5607 | NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0), |
5608 | NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0), |
5609 | NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0), |
5610 | NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0), |
5611 | NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0), |
5612 | NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0), |
5613 | NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0), |
5614 | NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0), |
5615 | NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0), |
5616 | NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0), |
5617 | NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0), |
5618 | NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0), |
5619 | NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0), |
5620 | NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0), |
5621 | NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0), |
5622 | NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0), |
5623 | NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0), |
5624 | NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0), |
5625 | NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0), |
5626 | NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0), |
5627 | NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0), |
5628 | NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0), |
5629 | NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0), |
5630 | NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0), |
5631 | NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0), |
5632 | NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0), |
5633 | NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0), |
5634 | NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0), |
5635 | NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0), |
5636 | NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0), |
5637 | NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0), |
5638 | NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0), |
5639 | NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0), |
5640 | NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0), |
5641 | NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0), |
5642 | NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0), |
5643 | NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0), |
5644 | NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0), |
5645 | NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0), |
5646 | NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0), |
5647 | NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0), |
5648 | NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0), |
5649 | NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0), |
5650 | NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0), |
5651 | NEONMAP0(vcvtq_f16_v), |
5652 | NEONMAP0(vcvtq_f32_v), |
5653 | NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5654 | NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), |
5655 | NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0), |
5656 | NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0), |
5657 | NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0), |
5658 | NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0), |
5659 | NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0), |
5660 | NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0), |
5661 | NEONMAP0(vcvtq_s16_v), |
5662 | NEONMAP0(vcvtq_s32_v), |
5663 | NEONMAP0(vcvtq_s64_v), |
5664 | NEONMAP0(vcvtq_u16_v), |
5665 | NEONMAP0(vcvtq_u32_v), |
5666 | NEONMAP0(vcvtq_u64_v), |
5667 | NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0), |
5668 | NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0), |
5669 | NEONMAP0(vext_v), |
5670 | NEONMAP0(vextq_v), |
5671 | NEONMAP0(vfma_v), |
5672 | NEONMAP0(vfmaq_v), |
5673 | NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
5674 | NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), |
5675 | NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
5676 | NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), |
5677 | NEONMAP0(vld1_dup_v), |
5678 | NEONMAP1(vld1_v, arm_neon_vld1, 0), |
5679 | NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0), |
5680 | NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0), |
5681 | NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0), |
5682 | NEONMAP0(vld1q_dup_v), |
5683 | NEONMAP1(vld1q_v, arm_neon_vld1, 0), |
5684 | NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0), |
5685 | NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0), |
5686 | NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0), |
5687 | NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0), |
5688 | NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0), |
5689 | NEONMAP1(vld2_v, arm_neon_vld2, 0), |
5690 | NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0), |
5691 | NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0), |
5692 | NEONMAP1(vld2q_v, arm_neon_vld2, 0), |
5693 | NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0), |
5694 | NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0), |
5695 | NEONMAP1(vld3_v, arm_neon_vld3, 0), |
5696 | NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0), |
5697 | NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0), |
5698 | NEONMAP1(vld3q_v, arm_neon_vld3, 0), |
5699 | NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0), |
5700 | NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0), |
5701 | NEONMAP1(vld4_v, arm_neon_vld4, 0), |
5702 | NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0), |
5703 | NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0), |
5704 | NEONMAP1(vld4q_v, arm_neon_vld4, 0), |
5705 | NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
5706 | NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType), |
5707 | NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType), |
5708 | NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), |
5709 | NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
5710 | NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType), |
5711 | NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType), |
5712 | NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), |
5713 | NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0), |
5714 | NEONMAP0(vmovl_v), |
5715 | NEONMAP0(vmovn_v), |
5716 | NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType), |
5717 | NEONMAP0(vmull_v), |
5718 | NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType), |
5719 | NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
5720 | NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), |
5721 | NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType), |
5722 | NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
5723 | NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), |
5724 | NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType), |
5725 | NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts), |
5726 | NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts), |
5727 | NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType), |
5728 | NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType), |
5729 | NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), |
5730 | NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), |
5731 | NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0), |
5732 | NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0), |
5733 | NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType), |
5734 | NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType), |
5735 | NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType), |
5736 | NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts), |
5737 | NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType), |
5738 | NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType), |
5739 | NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType), |
5740 | NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType), |
5741 | NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType), |
5742 | NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
5743 | NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), |
5744 | NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
5745 | NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
5746 | NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), |
5747 | NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), |
5748 | NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0), |
5749 | NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0), |
5750 | NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), |
5751 | NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), |
5752 | NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType), |
5753 | NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
5754 | NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), |
5755 | NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType), |
5756 | NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType), |
5757 | NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
5758 | NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), |
5759 | NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType), |
5760 | NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType), |
5761 | NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType), |
5762 | NEONMAP0(vrndi_v), |
5763 | NEONMAP0(vrndiq_v), |
5764 | NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType), |
5765 | NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType), |
5766 | NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType), |
5767 | NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType), |
5768 | NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType), |
5769 | NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType), |
5770 | NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType), |
5771 | NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType), |
5772 | NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType), |
5773 | NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
5774 | NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), |
5775 | NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
5776 | NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), |
5777 | NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
5778 | NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), |
5779 | NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType), |
5780 | NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType), |
5781 | NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType), |
5782 | NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0), |
5783 | NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0), |
5784 | NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0), |
5785 | NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0), |
5786 | NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0), |
5787 | NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0), |
5788 | NEONMAP0(vshl_n_v), |
5789 | NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
5790 | NEONMAP0(vshll_n_v), |
5791 | NEONMAP0(vshlq_n_v), |
5792 | NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), |
5793 | NEONMAP0(vshr_n_v), |
5794 | NEONMAP0(vshrn_n_v), |
5795 | NEONMAP0(vshrq_n_v), |
5796 | NEONMAP1(vst1_v, arm_neon_vst1, 0), |
5797 | NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0), |
5798 | NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0), |
5799 | NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0), |
5800 | NEONMAP1(vst1q_v, arm_neon_vst1, 0), |
5801 | NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0), |
5802 | NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0), |
5803 | NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0), |
5804 | NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0), |
5805 | NEONMAP1(vst2_v, arm_neon_vst2, 0), |
5806 | NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0), |
5807 | NEONMAP1(vst2q_v, arm_neon_vst2, 0), |
5808 | NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0), |
5809 | NEONMAP1(vst3_v, arm_neon_vst3, 0), |
5810 | NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0), |
5811 | NEONMAP1(vst3q_v, arm_neon_vst3, 0), |
5812 | NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0), |
5813 | NEONMAP1(vst4_v, arm_neon_vst4, 0), |
5814 | NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0), |
5815 | NEONMAP1(vst4q_v, arm_neon_vst4, 0), |
5816 | NEONMAP0(vsubhn_v), |
5817 | NEONMAP0(vtrn_v), |
5818 | NEONMAP0(vtrnq_v), |
5819 | NEONMAP0(vtst_v), |
5820 | NEONMAP0(vtstq_v), |
5821 | NEONMAP1(vusdot_v, arm_neon_usdot, 0), |
5822 | NEONMAP1(vusdotq_v, arm_neon_usdot, 0), |
5823 | NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0), |
5824 | NEONMAP0(vuzp_v), |
5825 | NEONMAP0(vuzpq_v), |
5826 | NEONMAP0(vzip_v), |
5827 | NEONMAP0(vzipq_v) |
5828 | }; |
5829 | |
5830 | static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = { |
5831 | NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0), |
5832 | NEONMAP0(splat_lane_v), |
5833 | NEONMAP0(splat_laneq_v), |
5834 | NEONMAP0(splatq_lane_v), |
5835 | NEONMAP0(splatq_laneq_v), |
5836 | NEONMAP1(vabs_v, aarch64_neon_abs, 0), |
5837 | NEONMAP1(vabsq_v, aarch64_neon_abs, 0), |
5838 | NEONMAP0(vadd_v), |
5839 | NEONMAP0(vaddhn_v), |
5840 | NEONMAP0(vaddq_p128), |
5841 | NEONMAP0(vaddq_v), |
5842 | NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0), |
5843 | NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0), |
5844 | NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0), |
5845 | NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0), |
5846 | NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), |
5847 | NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0), |
5848 | NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0), |
5849 | NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0), |
5850 | NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0), |
5851 | NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0), |
5852 | NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), |
5853 | NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), |
5854 | NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), |
5855 | NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), |
5856 | NEONMAP1(vcage_v, aarch64_neon_facge, 0), |
5857 | NEONMAP1(vcageq_v, aarch64_neon_facge, 0), |
5858 | NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), |
5859 | NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), |
5860 | NEONMAP1(vcale_v, aarch64_neon_facge, 0), |
5861 | NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), |
5862 | NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), |
5863 | NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), |
5864 | NEONMAP0(vceqz_v), |
5865 | NEONMAP0(vceqzq_v), |
5866 | NEONMAP0(vcgez_v), |
5867 | NEONMAP0(vcgezq_v), |
5868 | NEONMAP0(vcgtz_v), |
5869 | NEONMAP0(vcgtzq_v), |
5870 | NEONMAP0(vclez_v), |
5871 | NEONMAP0(vclezq_v), |
5872 | NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), |
5873 | NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), |
5874 | NEONMAP0(vcltz_v), |
5875 | NEONMAP0(vcltzq_v), |
5876 | NEONMAP1(vclz_v, ctlz, Add1ArgType), |
5877 | NEONMAP1(vclzq_v, ctlz, Add1ArgType), |
5878 | NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), |
5879 | NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), |
5880 | NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), |
5881 | NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType), |
5882 | NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), |
5883 | NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), |
5884 | NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), |
5885 | NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType), |
5886 | NEONMAP1(vcnt_v, ctpop, Add1ArgType), |
5887 | NEONMAP1(vcntq_v, ctpop, Add1ArgType), |
5888 | NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0), |
5889 | NEONMAP0(vcvt_f16_v), |
5890 | NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), |
5891 | NEONMAP0(vcvt_f32_v), |
5892 | NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5893 | NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5894 | NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5895 | NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), |
5896 | NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
5897 | NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
5898 | NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), |
5899 | NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
5900 | NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
5901 | NEONMAP0(vcvtq_f16_v), |
5902 | NEONMAP0(vcvtq_f32_v), |
5903 | NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0), |
5904 | NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5905 | NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5906 | NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), |
5907 | NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), |
5908 | NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), |
5909 | NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), |
5910 | NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), |
5911 | NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), |
5912 | NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), |
5913 | NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), |
5914 | NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0), |
5915 | NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0), |
5916 | NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), |
5917 | NEONMAP0(vext_v), |
5918 | NEONMAP0(vextq_v), |
5919 | NEONMAP0(vfma_v), |
5920 | NEONMAP0(vfmaq_v), |
5921 | NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0), |
5922 | NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0), |
5923 | NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0), |
5924 | NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0), |
5925 | NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0), |
5926 | NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0), |
5927 | NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0), |
5928 | NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0), |
5929 | NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
5930 | NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), |
5931 | NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
5932 | NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), |
5933 | NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0), |
5934 | NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0), |
5935 | NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0), |
5936 | NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0), |
5937 | NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0), |
5938 | NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0), |
5939 | NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0), |
5940 | NEONMAP0(vmovl_v), |
5941 | NEONMAP0(vmovn_v), |
5942 | NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), |
5943 | NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), |
5944 | NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), |
5945 | NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
5946 | NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), |
5947 | NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), |
5948 | NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), |
5949 | NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), |
5950 | NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
5951 | NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), |
5952 | NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), |
5953 | NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), |
5954 | NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0), |
5955 | NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0), |
5956 | NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), |
5957 | NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0), |
5958 | NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0), |
5959 | NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), |
5960 | NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), |
5961 | NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts), |
5962 | NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), |
5963 | NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), |
5964 | NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), |
5965 | NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0), |
5966 | NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), |
5967 | NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), |
5968 | NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0), |
5969 | NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), |
5970 | NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), |
5971 | NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
5972 | NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), |
5973 | NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), |
5974 | NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
5975 | NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts), |
5976 | NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), |
5977 | NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0), |
5978 | NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0), |
5979 | NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
5980 | NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), |
5981 | NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), |
5982 | NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0), |
5983 | NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
5984 | NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), |
5985 | NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), |
5986 | NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), |
5987 | NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
5988 | NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), |
5989 | NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType), |
5990 | NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType), |
5991 | NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType), |
5992 | NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType), |
5993 | NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType), |
5994 | NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType), |
5995 | NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType), |
5996 | NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType), |
5997 | NEONMAP0(vrndi_v), |
5998 | NEONMAP0(vrndiq_v), |
5999 | NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
6000 | NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), |
6001 | NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
6002 | NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), |
6003 | NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
6004 | NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), |
6005 | NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), |
6006 | NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), |
6007 | NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), |
6008 | NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0), |
6009 | NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0), |
6010 | NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0), |
6011 | NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0), |
6012 | NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0), |
6013 | NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0), |
6014 | NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0), |
6015 | NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0), |
6016 | NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0), |
6017 | NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0), |
6018 | NEONMAP0(vshl_n_v), |
6019 | NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
6020 | NEONMAP0(vshll_n_v), |
6021 | NEONMAP0(vshlq_n_v), |
6022 | NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), |
6023 | NEONMAP0(vshr_n_v), |
6024 | NEONMAP0(vshrn_n_v), |
6025 | NEONMAP0(vshrq_n_v), |
6026 | NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0), |
6027 | NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0), |
6028 | NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0), |
6029 | NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0), |
6030 | NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0), |
6031 | NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0), |
6032 | NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0), |
6033 | NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0), |
6034 | NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0), |
6035 | NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0), |
6036 | NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0), |
6037 | NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0), |
6038 | NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0), |
6039 | NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0), |
6040 | NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0), |
6041 | NEONMAP0(vsubhn_v), |
6042 | NEONMAP0(vtst_v), |
6043 | NEONMAP0(vtstq_v), |
6044 | NEONMAP1(vusdot_v, aarch64_neon_usdot, 0), |
6045 | NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0), |
6046 | NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0), |
6047 | NEONMAP1(vxarq_v, aarch64_crypto_xar, 0), |
6048 | }; |
6049 | |
6050 | static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = { |
6051 | NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), |
6052 | NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), |
6053 | NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), |
6054 | NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
6055 | NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
6056 | NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), |
6057 | NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), |
6058 | NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
6059 | NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
6060 | NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6061 | NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), |
6062 | NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), |
6063 | NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), |
6064 | NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), |
6065 | NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6066 | NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6067 | NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
6068 | NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
6069 | NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6070 | NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6071 | NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), |
6072 | NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), |
6073 | NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6074 | NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), |
6075 | NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6076 | NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6077 | NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6078 | NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6079 | NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6080 | NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6081 | NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6082 | NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6083 | NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6084 | NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6085 | NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0), |
6086 | NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6087 | NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6088 | NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6089 | NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6090 | NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6091 | NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6092 | NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6093 | NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6094 | NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6095 | NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6096 | NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6097 | NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6098 | NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6099 | NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6100 | NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6101 | NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6102 | NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6103 | NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6104 | NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), |
6105 | NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6106 | NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6107 | NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6108 | NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6109 | NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
6110 | NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
6111 | NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6112 | NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6113 | NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), |
6114 | NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), |
6115 | NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6116 | NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6117 | NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6118 | NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6119 | NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
6120 | NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
6121 | NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6122 | NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6123 | NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), |
6124 | NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), |
6125 | NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), |
6126 | NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), |
6127 | NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), |
6128 | NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6129 | NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), |
6130 | NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6131 | NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), |
6132 | NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6133 | NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), |
6134 | NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6135 | NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), |
6136 | NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6137 | NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), |
6138 | NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
6139 | NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), |
6140 | NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), |
6141 | NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), |
6142 | NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
6143 | NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
6144 | NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), |
6145 | NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), |
6146 | NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), |
6147 | NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), |
6148 | NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), |
6149 | NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), |
6150 | NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors), |
6151 | NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), |
6152 | NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), |
6153 | NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), |
6154 | NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), |
6155 | NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), |
6156 | NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
6157 | NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
6158 | NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), |
6159 | NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), |
6160 | NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType), |
6161 | NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
6162 | NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), |
6163 | NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
6164 | NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), |
6165 | NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), |
6166 | NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), |
6167 | NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors), |
6168 | NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), |
6169 | NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
6170 | NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
6171 | NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), |
6172 | NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), |
6173 | NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), |
6174 | NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), |
6175 | NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), |
6176 | NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), |
6177 | NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), |
6178 | NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), |
6179 | NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
6180 | NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
6181 | NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), |
6182 | NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), |
6183 | NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), |
6184 | NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
6185 | NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), |
6186 | NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6187 | NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6188 | NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6189 | NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6190 | NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), |
6191 | NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), |
6192 | NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6193 | NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6194 | NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), |
6195 | NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), |
6196 | NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), |
6197 | NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), |
6198 | NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), |
6199 | NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), |
6200 | NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
6201 | NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), |
6202 | NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), |
6203 | NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), |
6204 | NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), |
6205 | NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
6206 | NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
6207 | NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), |
6208 | NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), |
6209 | NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), |
6210 | NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
6211 | NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), |
6212 | NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
6213 | NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
6214 | NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), |
6215 | NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), |
6216 | NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), |
6217 | NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), |
6218 | NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), |
6219 | NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), |
6220 | NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), |
6221 | NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), |
6222 | NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), |
6223 | NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), |
6224 | NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), |
6225 | NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), |
6226 | NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), |
6227 | NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), |
6228 | NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), |
6229 | NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), |
6230 | NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), |
6231 | NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), |
6232 | NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), |
6233 | NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), |
6234 | NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), |
6235 | NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), |
6236 | NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), |
6237 | NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), |
6238 | NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
6239 | NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), |
6240 | NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), |
6241 | NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), |
6242 | NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), |
6243 | NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), |
6244 | NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
6245 | NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), |
6246 | NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), |
6247 | NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), |
6248 | |
6249 | NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType), |
6250 | NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6251 | NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), |
6252 | NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6253 | NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), |
6254 | NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6255 | NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), |
6256 | NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6257 | NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), |
6258 | NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6259 | NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), |
6260 | NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6261 | NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), |
6262 | NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6263 | NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), |
6264 | NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6265 | NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), |
6266 | NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6267 | NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), |
6268 | NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6269 | NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), |
6270 | NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6271 | NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), |
6272 | NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6273 | NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), |
6274 | NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6275 | NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), |
6276 | NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6277 | NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), |
6278 | NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType), |
6279 | NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType), |
6280 | NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType), |
6281 | NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType), |
6282 | NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), |
6283 | }; |
6284 | |
6285 | #undef NEONMAP0 |
6286 | #undef NEONMAP1 |
6287 | #undef NEONMAP2 |
6288 | |
6289 | #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ |
6290 | { \ |
6291 | #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ |
6292 | TypeModifier \ |
6293 | } |
6294 | |
6295 | #define SVEMAP2(NameBase, TypeModifier) \ |
6296 | { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier } |
6297 | static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = { |
6298 | #define GET_SVE_LLVM_INTRINSIC_MAP |
6299 | #include "clang/Basic/arm_sve_builtin_cg.inc" |
6300 | #undef GET_SVE_LLVM_INTRINSIC_MAP |
6301 | }; |
6302 | |
6303 | #undef SVEMAP1 |
6304 | #undef SVEMAP2 |
6305 | |
6306 | static bool NEONSIMDIntrinsicsProvenSorted = false; |
6307 | |
6308 | static bool AArch64SIMDIntrinsicsProvenSorted = false; |
6309 | static bool AArch64SISDIntrinsicsProvenSorted = false; |
6310 | static bool AArch64SVEIntrinsicsProvenSorted = false; |
6311 | |
6312 | static const ARMVectorIntrinsicInfo * |
6313 | findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap, |
6314 | unsigned BuiltinID, bool &MapProvenSorted) { |
6315 | |
6316 | #ifndef NDEBUG |
6317 | if (!MapProvenSorted) { |
6318 | assert(llvm::is_sorted(IntrinsicMap)); |
6319 | MapProvenSorted = true; |
6320 | } |
6321 | #endif |
6322 | |
6323 | const ARMVectorIntrinsicInfo *Builtin = |
6324 | llvm::lower_bound(IntrinsicMap, BuiltinID); |
6325 | |
6326 | if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID) |
6327 | return Builtin; |
6328 | |
6329 | return nullptr; |
6330 | } |
6331 | |
6332 | Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID, |
6333 | unsigned Modifier, |
6334 | llvm::Type *ArgType, |
6335 | const CallExpr *E) { |
6336 | int VectorSize = 0; |
6337 | if (Modifier & Use64BitVectors) |
6338 | VectorSize = 64; |
6339 | else if (Modifier & Use128BitVectors) |
6340 | VectorSize = 128; |
6341 | |
6342 | |
6343 | SmallVector<llvm::Type *, 3> Tys; |
6344 | if (Modifier & AddRetType) { |
6345 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); |
6346 | if (Modifier & VectorizeRetType) |
6347 | Ty = llvm::FixedVectorType::get( |
6348 | Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1); |
6349 | |
6350 | Tys.push_back(Ty); |
6351 | } |
6352 | |
6353 | |
6354 | if (Modifier & VectorizeArgTypes) { |
6355 | int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1; |
6356 | ArgType = llvm::FixedVectorType::get(ArgType, Elts); |
6357 | } |
6358 | |
6359 | if (Modifier & (Add1ArgType | Add2ArgTypes)) |
6360 | Tys.push_back(ArgType); |
6361 | |
6362 | if (Modifier & Add2ArgTypes) |
6363 | Tys.push_back(ArgType); |
6364 | |
6365 | if (Modifier & InventFloatType) |
6366 | Tys.push_back(FloatTy); |
6367 | |
6368 | return CGM.getIntrinsic(IntrinsicID, Tys); |
6369 | } |
6370 | |
6371 | static Value *EmitCommonNeonSISDBuiltinExpr( |
6372 | CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo, |
6373 | SmallVectorImpl<Value *> &Ops, const CallExpr *E) { |
6374 | unsigned BuiltinID = SISDInfo.BuiltinID; |
6375 | unsigned int Int = SISDInfo.LLVMIntrinsic; |
6376 | unsigned Modifier = SISDInfo.TypeModifier; |
6377 | const char *s = SISDInfo.NameHint; |
6378 | |
6379 | switch (BuiltinID) { |
6380 | case NEON::BI__builtin_neon_vcled_s64: |
6381 | case NEON::BI__builtin_neon_vcled_u64: |
6382 | case NEON::BI__builtin_neon_vcles_f32: |
6383 | case NEON::BI__builtin_neon_vcled_f64: |
6384 | case NEON::BI__builtin_neon_vcltd_s64: |
6385 | case NEON::BI__builtin_neon_vcltd_u64: |
6386 | case NEON::BI__builtin_neon_vclts_f32: |
6387 | case NEON::BI__builtin_neon_vcltd_f64: |
6388 | case NEON::BI__builtin_neon_vcales_f32: |
6389 | case NEON::BI__builtin_neon_vcaled_f64: |
6390 | case NEON::BI__builtin_neon_vcalts_f32: |
6391 | case NEON::BI__builtin_neon_vcaltd_f64: |
6392 | |
6393 | |
6394 | |
6395 | std::swap(Ops[0], Ops[1]); |
6396 | break; |
6397 | } |
6398 | |
6399 | assert(Int && "Generic code assumes a valid intrinsic"); |
6400 | |
6401 | |
6402 | const Expr *Arg = E->getArg(0); |
6403 | llvm::Type *ArgTy = CGF.ConvertType(Arg->getType()); |
6404 | Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E); |
6405 | |
6406 | int j = 0; |
6407 | ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0); |
6408 | for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); |
6409 | ai != ae; ++ai, ++j) { |
6410 | llvm::Type *ArgTy = ai->getType(); |
6411 | if (Ops[j]->getType()->getPrimitiveSizeInBits() == |
6412 | ArgTy->getPrimitiveSizeInBits()) |
6413 | continue; |
6414 | |
6415 | assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()); |
6416 | |
6417 | |
6418 | Ops[j] = CGF.Builder.CreateTruncOrBitCast( |
6419 | Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType()); |
6420 | Ops[j] = |
6421 | CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0); |
6422 | } |
6423 | |
6424 | Value *Result = CGF.EmitNeonCall(F, Ops, s); |
6425 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
6426 | if (ResultType->getPrimitiveSizeInBits().getFixedSize() < |
6427 | Result->getType()->getPrimitiveSizeInBits().getFixedSize()) |
6428 | return CGF.Builder.CreateExtractElement(Result, C0); |
6429 | |
6430 | return CGF.Builder.CreateBitCast(Result, ResultType, s); |
6431 | } |
6432 | |
6433 | Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( |
6434 | unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic, |
6435 | const char *NameHint, unsigned Modifier, const CallExpr *E, |
6436 | SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1, |
6437 | llvm::Triple::ArchType Arch) { |
6438 | |
6439 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); |
6440 | Optional<llvm::APSInt> NeonTypeConst = |
6441 | Arg->getIntegerConstantExpr(getContext()); |
6442 | if (!NeonTypeConst) |
6443 | return nullptr; |
6444 | |
6445 | |
6446 | NeonTypeFlags Type(NeonTypeConst->getZExtValue()); |
6447 | bool Usgn = Type.isUnsigned(); |
6448 | bool Quad = Type.isQuad(); |
6449 | const bool HasLegalHalfType = getTarget().hasLegalHalfType(); |
6450 | const bool AllowBFloatArgsAndRet = |
6451 | getTargetHooks().getABIInfo().allowBFloatArgsAndRet(); |
6452 | |
6453 | llvm::FixedVectorType *VTy = |
6454 | GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet); |
6455 | llvm::Type *Ty = VTy; |
6456 | if (!Ty) |
6457 | return nullptr; |
6458 | |
6459 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
6460 | return Builder.getInt32(addr.getAlignment().getQuantity()); |
6461 | }; |
6462 | |
6463 | unsigned Int = LLVMIntrinsic; |
6464 | if ((Modifier & UnsignedAlts) && !Usgn) |
6465 | Int = AltLLVMIntrinsic; |
6466 | |
6467 | switch (BuiltinID) { |
6468 | default: break; |
6469 | case NEON::BI__builtin_neon_splat_lane_v: |
6470 | case NEON::BI__builtin_neon_splat_laneq_v: |
6471 | case NEON::BI__builtin_neon_splatq_lane_v: |
6472 | case NEON::BI__builtin_neon_splatq_laneq_v: { |
6473 | auto NumElements = VTy->getElementCount(); |
6474 | if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v) |
6475 | NumElements = NumElements * 2; |
6476 | if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v) |
6477 | NumElements = NumElements.divideCoefficientBy(2); |
6478 | |
6479 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
6480 | return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements); |
6481 | } |
6482 | case NEON::BI__builtin_neon_vpadd_v: |
6483 | case NEON::BI__builtin_neon_vpaddq_v: |
6484 | |
6485 | if (VTy->getElementType()->isFloatingPointTy() && |
6486 | Int == Intrinsic::aarch64_neon_addp) |
6487 | Int = Intrinsic::aarch64_neon_faddp; |
6488 | break; |
6489 | case NEON::BI__builtin_neon_vabs_v: |
6490 | case NEON::BI__builtin_neon_vabsq_v: |
6491 | if (VTy->getElementType()->isFloatingPointTy()) |
6492 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs"); |
6493 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs"); |
6494 | case NEON::BI__builtin_neon_vadd_v: |
6495 | case NEON::BI__builtin_neon_vaddq_v: { |
6496 | llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8); |
6497 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
6498 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
6499 | Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); |
6500 | return Builder.CreateBitCast(Ops[0], Ty); |
6501 | } |
6502 | case NEON::BI__builtin_neon_vaddhn_v: { |
6503 | llvm::FixedVectorType *SrcTy = |
6504 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6505 | |
6506 | |
6507 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
6508 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); |
6509 | Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn"); |
6510 | |
6511 | |
6512 | Constant *ShiftAmt = |
6513 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); |
6514 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn"); |
6515 | |
6516 | |
6517 | return Builder.CreateTrunc(Ops[0], VTy, "vaddhn"); |
6518 | } |
6519 | case NEON::BI__builtin_neon_vcale_v: |
6520 | case NEON::BI__builtin_neon_vcaleq_v: |
6521 | case NEON::BI__builtin_neon_vcalt_v: |
6522 | case NEON::BI__builtin_neon_vcaltq_v: |
6523 | std::swap(Ops[0], Ops[1]); |
6524 | LLVM_FALLTHROUGH; |
6525 | case NEON::BI__builtin_neon_vcage_v: |
6526 | case NEON::BI__builtin_neon_vcageq_v: |
6527 | case NEON::BI__builtin_neon_vcagt_v: |
6528 | case NEON::BI__builtin_neon_vcagtq_v: { |
6529 | llvm::Type *Ty; |
6530 | switch (VTy->getScalarSizeInBits()) { |
6531 | default: llvm_unreachable("unexpected type"); |
6532 | case 32: |
6533 | Ty = FloatTy; |
6534 | break; |
6535 | case 64: |
6536 | Ty = DoubleTy; |
6537 | break; |
6538 | case 16: |
6539 | Ty = HalfTy; |
6540 | break; |
6541 | } |
6542 | auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements()); |
6543 | llvm::Type *Tys[] = { VTy, VecFlt }; |
6544 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6545 | return EmitNeonCall(F, Ops, NameHint); |
6546 | } |
6547 | case NEON::BI__builtin_neon_vceqz_v: |
6548 | case NEON::BI__builtin_neon_vceqzq_v: |
6549 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ, |
6550 | ICmpInst::ICMP_EQ, "vceqz"); |
6551 | case NEON::BI__builtin_neon_vcgez_v: |
6552 | case NEON::BI__builtin_neon_vcgezq_v: |
6553 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE, |
6554 | ICmpInst::ICMP_SGE, "vcgez"); |
6555 | case NEON::BI__builtin_neon_vclez_v: |
6556 | case NEON::BI__builtin_neon_vclezq_v: |
6557 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE, |
6558 | ICmpInst::ICMP_SLE, "vclez"); |
6559 | case NEON::BI__builtin_neon_vcgtz_v: |
6560 | case NEON::BI__builtin_neon_vcgtzq_v: |
6561 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT, |
6562 | ICmpInst::ICMP_SGT, "vcgtz"); |
6563 | case NEON::BI__builtin_neon_vcltz_v: |
6564 | case NEON::BI__builtin_neon_vcltzq_v: |
6565 | return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT, |
6566 | ICmpInst::ICMP_SLT, "vcltz"); |
6567 | case NEON::BI__builtin_neon_vclz_v: |
6568 | case NEON::BI__builtin_neon_vclzq_v: |
6569 | |
6570 | |
6571 | Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef())); |
6572 | break; |
6573 | case NEON::BI__builtin_neon_vcvt_f32_v: |
6574 | case NEON::BI__builtin_neon_vcvtq_f32_v: |
6575 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6576 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad), |
6577 | HasLegalHalfType); |
6578 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
6579 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
6580 | case NEON::BI__builtin_neon_vcvt_f16_v: |
6581 | case NEON::BI__builtin_neon_vcvtq_f16_v: |
6582 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6583 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad), |
6584 | HasLegalHalfType); |
6585 | return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
6586 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
6587 | case NEON::BI__builtin_neon_vcvt_n_f16_v: |
6588 | case NEON::BI__builtin_neon_vcvt_n_f32_v: |
6589 | case NEON::BI__builtin_neon_vcvt_n_f64_v: |
6590 | case NEON::BI__builtin_neon_vcvtq_n_f16_v: |
6591 | case NEON::BI__builtin_neon_vcvtq_n_f32_v: |
6592 | case NEON::BI__builtin_neon_vcvtq_n_f64_v: { |
6593 | llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty }; |
6594 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
6595 | Function *F = CGM.getIntrinsic(Int, Tys); |
6596 | return EmitNeonCall(F, Ops, "vcvt_n"); |
6597 | } |
6598 | case NEON::BI__builtin_neon_vcvt_n_s16_v: |
6599 | case NEON::BI__builtin_neon_vcvt_n_s32_v: |
6600 | case NEON::BI__builtin_neon_vcvt_n_u16_v: |
6601 | case NEON::BI__builtin_neon_vcvt_n_u32_v: |
6602 | case NEON::BI__builtin_neon_vcvt_n_s64_v: |
6603 | case NEON::BI__builtin_neon_vcvt_n_u64_v: |
6604 | case NEON::BI__builtin_neon_vcvtq_n_s16_v: |
6605 | case NEON::BI__builtin_neon_vcvtq_n_s32_v: |
6606 | case NEON::BI__builtin_neon_vcvtq_n_u16_v: |
6607 | case NEON::BI__builtin_neon_vcvtq_n_u32_v: |
6608 | case NEON::BI__builtin_neon_vcvtq_n_s64_v: |
6609 | case NEON::BI__builtin_neon_vcvtq_n_u64_v: { |
6610 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
6611 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6612 | return EmitNeonCall(F, Ops, "vcvt_n"); |
6613 | } |
6614 | case NEON::BI__builtin_neon_vcvt_s32_v: |
6615 | case NEON::BI__builtin_neon_vcvt_u32_v: |
6616 | case NEON::BI__builtin_neon_vcvt_s64_v: |
6617 | case NEON::BI__builtin_neon_vcvt_u64_v: |
6618 | case NEON::BI__builtin_neon_vcvt_s16_v: |
6619 | case NEON::BI__builtin_neon_vcvt_u16_v: |
6620 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
6621 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
6622 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
6623 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
6624 | case NEON::BI__builtin_neon_vcvtq_s16_v: |
6625 | case NEON::BI__builtin_neon_vcvtq_u16_v: { |
6626 | Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type)); |
6627 | return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") |
6628 | : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); |
6629 | } |
6630 | case NEON::BI__builtin_neon_vcvta_s16_v: |
6631 | case NEON::BI__builtin_neon_vcvta_s32_v: |
6632 | case NEON::BI__builtin_neon_vcvta_s64_v: |
6633 | case NEON::BI__builtin_neon_vcvta_u16_v: |
6634 | case NEON::BI__builtin_neon_vcvta_u32_v: |
6635 | case NEON::BI__builtin_neon_vcvta_u64_v: |
6636 | case NEON::BI__builtin_neon_vcvtaq_s16_v: |
6637 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
6638 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
6639 | case NEON::BI__builtin_neon_vcvtaq_u16_v: |
6640 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
6641 | case NEON::BI__builtin_neon_vcvtaq_u64_v: |
6642 | case NEON::BI__builtin_neon_vcvtn_s16_v: |
6643 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
6644 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
6645 | case NEON::BI__builtin_neon_vcvtn_u16_v: |
6646 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
6647 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
6648 | case NEON::BI__builtin_neon_vcvtnq_s16_v: |
6649 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
6650 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
6651 | case NEON::BI__builtin_neon_vcvtnq_u16_v: |
6652 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
6653 | case NEON::BI__builtin_neon_vcvtnq_u64_v: |
6654 | case NEON::BI__builtin_neon_vcvtp_s16_v: |
6655 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
6656 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
6657 | case NEON::BI__builtin_neon_vcvtp_u16_v: |
6658 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
6659 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
6660 | case NEON::BI__builtin_neon_vcvtpq_s16_v: |
6661 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
6662 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
6663 | case NEON::BI__builtin_neon_vcvtpq_u16_v: |
6664 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
6665 | case NEON::BI__builtin_neon_vcvtpq_u64_v: |
6666 | case NEON::BI__builtin_neon_vcvtm_s16_v: |
6667 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
6668 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
6669 | case NEON::BI__builtin_neon_vcvtm_u16_v: |
6670 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
6671 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
6672 | case NEON::BI__builtin_neon_vcvtmq_s16_v: |
6673 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
6674 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
6675 | case NEON::BI__builtin_neon_vcvtmq_u16_v: |
6676 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
6677 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
6678 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
6679 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); |
6680 | } |
6681 | case NEON::BI__builtin_neon_vcvtx_f32_v: { |
6682 | llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty}; |
6683 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); |
6684 | |
6685 | } |
6686 | case NEON::BI__builtin_neon_vext_v: |
6687 | case NEON::BI__builtin_neon_vextq_v: { |
6688 | int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); |
6689 | SmallVector<int, 16> Indices; |
6690 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
6691 | Indices.push_back(i+CV); |
6692 | |
6693 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6694 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6695 | return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext"); |
6696 | } |
6697 | case NEON::BI__builtin_neon_vfma_v: |
6698 | case NEON::BI__builtin_neon_vfmaq_v: { |
6699 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6700 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6701 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
6702 | |
6703 | |
6704 | return emitCallMaybeConstrainedFPBuiltin( |
6705 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
6706 | {Ops[1], Ops[2], Ops[0]}); |
6707 | } |
6708 | case NEON::BI__builtin_neon_vld1_v: |
6709 | case NEON::BI__builtin_neon_vld1q_v: { |
6710 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6711 | Ops.push_back(getAlignmentValue32(PtrOp0)); |
6712 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1"); |
6713 | } |
6714 | case NEON::BI__builtin_neon_vld1_x2_v: |
6715 | case NEON::BI__builtin_neon_vld1q_x2_v: |
6716 | case NEON::BI__builtin_neon_vld1_x3_v: |
6717 | case NEON::BI__builtin_neon_vld1q_x3_v: |
6718 | case NEON::BI__builtin_neon_vld1_x4_v: |
6719 | case NEON::BI__builtin_neon_vld1q_x4_v: { |
6720 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
6721 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
6722 | llvm::Type *Tys[2] = { VTy, PTy }; |
6723 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6724 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN"); |
6725 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6726 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6727 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
6728 | } |
6729 | case NEON::BI__builtin_neon_vld2_v: |
6730 | case NEON::BI__builtin_neon_vld2q_v: |
6731 | case NEON::BI__builtin_neon_vld3_v: |
6732 | case NEON::BI__builtin_neon_vld3q_v: |
6733 | case NEON::BI__builtin_neon_vld4_v: |
6734 | case NEON::BI__builtin_neon_vld4q_v: |
6735 | case NEON::BI__builtin_neon_vld2_dup_v: |
6736 | case NEON::BI__builtin_neon_vld2q_dup_v: |
6737 | case NEON::BI__builtin_neon_vld3_dup_v: |
6738 | case NEON::BI__builtin_neon_vld3q_dup_v: |
6739 | case NEON::BI__builtin_neon_vld4_dup_v: |
6740 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
6741 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6742 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6743 | Value *Align = getAlignmentValue32(PtrOp1); |
6744 | Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint); |
6745 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6746 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6747 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
6748 | } |
6749 | case NEON::BI__builtin_neon_vld1_dup_v: |
6750 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
6751 | Value *V = UndefValue::get(Ty); |
6752 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
6753 | PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty); |
6754 | LoadInst *Ld = Builder.CreateLoad(PtrOp0); |
6755 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); |
6756 | Ops[0] = Builder.CreateInsertElement(V, Ld, CI); |
6757 | return EmitNeonSplat(Ops[0], CI); |
6758 | } |
6759 | case NEON::BI__builtin_neon_vld2_lane_v: |
6760 | case NEON::BI__builtin_neon_vld2q_lane_v: |
6761 | case NEON::BI__builtin_neon_vld3_lane_v: |
6762 | case NEON::BI__builtin_neon_vld3q_lane_v: |
6763 | case NEON::BI__builtin_neon_vld4_lane_v: |
6764 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
6765 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
6766 | Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); |
6767 | for (unsigned I = 2; I < Ops.size() - 1; ++I) |
6768 | Ops[I] = Builder.CreateBitCast(Ops[I], Ty); |
6769 | Ops.push_back(getAlignmentValue32(PtrOp1)); |
6770 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint); |
6771 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
6772 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
6773 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
6774 | } |
6775 | case NEON::BI__builtin_neon_vmovl_v: { |
6776 | llvm::FixedVectorType *DTy = |
6777 | llvm::FixedVectorType::getTruncatedElementVectorType(VTy); |
6778 | Ops[0] = Builder.CreateBitCast(Ops[0], DTy); |
6779 | if (Usgn) |
6780 | return Builder.CreateZExt(Ops[0], Ty, "vmovl"); |
6781 | return Builder.CreateSExt(Ops[0], Ty, "vmovl"); |
6782 | } |
6783 | case NEON::BI__builtin_neon_vmovn_v: { |
6784 | llvm::FixedVectorType *QTy = |
6785 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6786 | Ops[0] = Builder.CreateBitCast(Ops[0], QTy); |
6787 | return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); |
6788 | } |
6789 | case NEON::BI__builtin_neon_vmull_v: |
6790 | |
6791 | |
6792 | |
6793 | |
6794 | |
6795 | Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; |
6796 | Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; |
6797 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); |
6798 | case NEON::BI__builtin_neon_vpadal_v: |
6799 | case NEON::BI__builtin_neon_vpadalq_v: { |
6800 | |
6801 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
6802 | llvm::Type *EltTy = |
6803 | llvm::IntegerType::get(getLLVMContext(), EltBits / 2); |
6804 | auto *NarrowTy = |
6805 | llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); |
6806 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
6807 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
6808 | } |
6809 | case NEON::BI__builtin_neon_vpaddl_v: |
6810 | case NEON::BI__builtin_neon_vpaddlq_v: { |
6811 | |
6812 | unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); |
6813 | llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); |
6814 | auto *NarrowTy = |
6815 | llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); |
6816 | llvm::Type *Tys[2] = { Ty, NarrowTy }; |
6817 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); |
6818 | } |
6819 | case NEON::BI__builtin_neon_vqdmlal_v: |
6820 | case NEON::BI__builtin_neon_vqdmlsl_v: { |
6821 | SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end()); |
6822 | Ops[1] = |
6823 | EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal"); |
6824 | Ops.resize(2); |
6825 | return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint); |
6826 | } |
6827 | case NEON::BI__builtin_neon_vqdmulhq_lane_v: |
6828 | case NEON::BI__builtin_neon_vqdmulh_lane_v: |
6829 | case NEON::BI__builtin_neon_vqrdmulhq_lane_v: |
6830 | case NEON::BI__builtin_neon_vqrdmulh_lane_v: { |
6831 | auto *RTy = cast<llvm::FixedVectorType>(Ty); |
6832 | if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || |
6833 | BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) |
6834 | RTy = llvm::FixedVectorType::get(RTy->getElementType(), |
6835 | RTy->getNumElements() * 2); |
6836 | llvm::Type *Tys[2] = { |
6837 | RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false, |
6838 | false))}; |
6839 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
6840 | } |
6841 | case NEON::BI__builtin_neon_vqdmulhq_laneq_v: |
6842 | case NEON::BI__builtin_neon_vqdmulh_laneq_v: |
6843 | case NEON::BI__builtin_neon_vqrdmulhq_laneq_v: |
6844 | case NEON::BI__builtin_neon_vqrdmulh_laneq_v: { |
6845 | llvm::Type *Tys[2] = { |
6846 | Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false, |
6847 | true))}; |
6848 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); |
6849 | } |
6850 | case NEON::BI__builtin_neon_vqshl_n_v: |
6851 | case NEON::BI__builtin_neon_vqshlq_n_v: |
6852 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", |
6853 | 1, false); |
6854 | case NEON::BI__builtin_neon_vqshlu_n_v: |
6855 | case NEON::BI__builtin_neon_vqshluq_n_v: |
6856 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", |
6857 | 1, false); |
6858 | case NEON::BI__builtin_neon_vrecpe_v: |
6859 | case NEON::BI__builtin_neon_vrecpeq_v: |
6860 | case NEON::BI__builtin_neon_vrsqrte_v: |
6861 | case NEON::BI__builtin_neon_vrsqrteq_v: |
6862 | Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic; |
6863 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); |
6864 | case NEON::BI__builtin_neon_vrndi_v: |
6865 | case NEON::BI__builtin_neon_vrndiq_v: |
6866 | Int = Builder.getIsFPConstrained() |
6867 | ? Intrinsic::experimental_constrained_nearbyint |
6868 | : Intrinsic::nearbyint; |
6869 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); |
6870 | case NEON::BI__builtin_neon_vrshr_n_v: |
6871 | case NEON::BI__builtin_neon_vrshrq_n_v: |
6872 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", |
6873 | 1, true); |
6874 | case NEON::BI__builtin_neon_vsha512hq_v: |
6875 | case NEON::BI__builtin_neon_vsha512h2q_v: |
6876 | case NEON::BI__builtin_neon_vsha512su0q_v: |
6877 | case NEON::BI__builtin_neon_vsha512su1q_v: { |
6878 | Function *F = CGM.getIntrinsic(Int); |
6879 | return EmitNeonCall(F, Ops, ""); |
6880 | } |
6881 | case NEON::BI__builtin_neon_vshl_n_v: |
6882 | case NEON::BI__builtin_neon_vshlq_n_v: |
6883 | Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); |
6884 | return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], |
6885 | "vshl_n"); |
6886 | case NEON::BI__builtin_neon_vshll_n_v: { |
6887 | llvm::FixedVectorType *SrcTy = |
6888 | llvm::FixedVectorType::getTruncatedElementVectorType(VTy); |
6889 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
6890 | if (Usgn) |
6891 | Ops[0] = Builder.CreateZExt(Ops[0], VTy); |
6892 | else |
6893 | Ops[0] = Builder.CreateSExt(Ops[0], VTy); |
6894 | Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false); |
6895 | return Builder.CreateShl(Ops[0], Ops[1], "vshll_n"); |
6896 | } |
6897 | case NEON::BI__builtin_neon_vshrn_n_v: { |
6898 | llvm::FixedVectorType *SrcTy = |
6899 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6900 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
6901 | Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false); |
6902 | if (Usgn) |
6903 | Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]); |
6904 | else |
6905 | Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]); |
6906 | return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n"); |
6907 | } |
6908 | case NEON::BI__builtin_neon_vshr_n_v: |
6909 | case NEON::BI__builtin_neon_vshrq_n_v: |
6910 | return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n"); |
6911 | case NEON::BI__builtin_neon_vst1_v: |
6912 | case NEON::BI__builtin_neon_vst1q_v: |
6913 | case NEON::BI__builtin_neon_vst2_v: |
6914 | case NEON::BI__builtin_neon_vst2q_v: |
6915 | case NEON::BI__builtin_neon_vst3_v: |
6916 | case NEON::BI__builtin_neon_vst3q_v: |
6917 | case NEON::BI__builtin_neon_vst4_v: |
6918 | case NEON::BI__builtin_neon_vst4q_v: |
6919 | case NEON::BI__builtin_neon_vst2_lane_v: |
6920 | case NEON::BI__builtin_neon_vst2q_lane_v: |
6921 | case NEON::BI__builtin_neon_vst3_lane_v: |
6922 | case NEON::BI__builtin_neon_vst3q_lane_v: |
6923 | case NEON::BI__builtin_neon_vst4_lane_v: |
6924 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
6925 | llvm::Type *Tys[] = {Int8PtrTy, Ty}; |
6926 | Ops.push_back(getAlignmentValue32(PtrOp0)); |
6927 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, ""); |
6928 | } |
6929 | case NEON::BI__builtin_neon_vsm3partw1q_v: |
6930 | case NEON::BI__builtin_neon_vsm3partw2q_v: |
6931 | case NEON::BI__builtin_neon_vsm3ss1q_v: |
6932 | case NEON::BI__builtin_neon_vsm4ekeyq_v: |
6933 | case NEON::BI__builtin_neon_vsm4eq_v: { |
6934 | Function *F = CGM.getIntrinsic(Int); |
6935 | return EmitNeonCall(F, Ops, ""); |
6936 | } |
6937 | case NEON::BI__builtin_neon_vsm3tt1aq_v: |
6938 | case NEON::BI__builtin_neon_vsm3tt1bq_v: |
6939 | case NEON::BI__builtin_neon_vsm3tt2aq_v: |
6940 | case NEON::BI__builtin_neon_vsm3tt2bq_v: { |
6941 | Function *F = CGM.getIntrinsic(Int); |
6942 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
6943 | return EmitNeonCall(F, Ops, ""); |
6944 | } |
6945 | case NEON::BI__builtin_neon_vst1_x2_v: |
6946 | case NEON::BI__builtin_neon_vst1q_x2_v: |
6947 | case NEON::BI__builtin_neon_vst1_x3_v: |
6948 | case NEON::BI__builtin_neon_vst1q_x3_v: |
6949 | case NEON::BI__builtin_neon_vst1_x4_v: |
6950 | case NEON::BI__builtin_neon_vst1q_x4_v: { |
6951 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
6952 | |
6953 | |
6954 | if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be || |
6955 | Arch == llvm::Triple::aarch64_32) { |
6956 | llvm::Type *Tys[2] = { VTy, PTy }; |
6957 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
6958 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); |
6959 | } |
6960 | llvm::Type *Tys[2] = { PTy, VTy }; |
6961 | return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); |
6962 | } |
6963 | case NEON::BI__builtin_neon_vsubhn_v: { |
6964 | llvm::FixedVectorType *SrcTy = |
6965 | llvm::FixedVectorType::getExtendedElementVectorType(VTy); |
6966 | |
6967 | |
6968 | Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); |
6969 | Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy); |
6970 | Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn"); |
6971 | |
6972 | |
6973 | Constant *ShiftAmt = |
6974 | ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); |
6975 | Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn"); |
6976 | |
6977 | |
6978 | return Builder.CreateTrunc(Ops[0], VTy, "vsubhn"); |
6979 | } |
6980 | case NEON::BI__builtin_neon_vtrn_v: |
6981 | case NEON::BI__builtin_neon_vtrnq_v: { |
6982 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
6983 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
6984 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
6985 | Value *SV = nullptr; |
6986 | |
6987 | for (unsigned vi = 0; vi != 2; ++vi) { |
6988 | SmallVector<int, 16> Indices; |
6989 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
6990 | Indices.push_back(i+vi); |
6991 | Indices.push_back(i+e+vi); |
6992 | } |
6993 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
6994 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); |
6995 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
6996 | } |
6997 | return SV; |
6998 | } |
6999 | case NEON::BI__builtin_neon_vtst_v: |
7000 | case NEON::BI__builtin_neon_vtstq_v: { |
7001 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
7002 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7003 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); |
7004 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], |
7005 | ConstantAggregateZero::get(Ty)); |
7006 | return Builder.CreateSExt(Ops[0], Ty, "vtst"); |
7007 | } |
7008 | case NEON::BI__builtin_neon_vuzp_v: |
7009 | case NEON::BI__builtin_neon_vuzpq_v: { |
7010 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
7011 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7012 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
7013 | Value *SV = nullptr; |
7014 | |
7015 | for (unsigned vi = 0; vi != 2; ++vi) { |
7016 | SmallVector<int, 16> Indices; |
7017 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
7018 | Indices.push_back(2*i+vi); |
7019 | |
7020 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
7021 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); |
7022 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
7023 | } |
7024 | return SV; |
7025 | } |
7026 | case NEON::BI__builtin_neon_vxarq_v: { |
7027 | Function *F = CGM.getIntrinsic(Int); |
7028 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
7029 | return EmitNeonCall(F, Ops, ""); |
7030 | } |
7031 | case NEON::BI__builtin_neon_vzip_v: |
7032 | case NEON::BI__builtin_neon_vzipq_v: { |
7033 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
7034 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7035 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
7036 | Value *SV = nullptr; |
7037 | |
7038 | for (unsigned vi = 0; vi != 2; ++vi) { |
7039 | SmallVector<int, 16> Indices; |
7040 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
7041 | Indices.push_back((i + vi*e) >> 1); |
7042 | Indices.push_back(((i + vi*e) >> 1)+e); |
7043 | } |
7044 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
7045 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); |
7046 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
7047 | } |
7048 | return SV; |
7049 | } |
7050 | case NEON::BI__builtin_neon_vdot_v: |
7051 | case NEON::BI__builtin_neon_vdotq_v: { |
7052 | auto *InputTy = |
7053 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7054 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7055 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
7056 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot"); |
7057 | } |
7058 | case NEON::BI__builtin_neon_vfmlal_low_v: |
7059 | case NEON::BI__builtin_neon_vfmlalq_low_v: { |
7060 | auto *InputTy = |
7061 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7062 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7063 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low"); |
7064 | } |
7065 | case NEON::BI__builtin_neon_vfmlsl_low_v: |
7066 | case NEON::BI__builtin_neon_vfmlslq_low_v: { |
7067 | auto *InputTy = |
7068 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7069 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7070 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low"); |
7071 | } |
7072 | case NEON::BI__builtin_neon_vfmlal_high_v: |
7073 | case NEON::BI__builtin_neon_vfmlalq_high_v: { |
7074 | auto *InputTy = |
7075 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7076 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7077 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high"); |
7078 | } |
7079 | case NEON::BI__builtin_neon_vfmlsl_high_v: |
7080 | case NEON::BI__builtin_neon_vfmlslq_high_v: { |
7081 | auto *InputTy = |
7082 | llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); |
7083 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7084 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high"); |
7085 | } |
7086 | case NEON::BI__builtin_neon_vmmlaq_v: { |
7087 | auto *InputTy = |
7088 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7089 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7090 | Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; |
7091 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla"); |
7092 | } |
7093 | case NEON::BI__builtin_neon_vusmmlaq_v: { |
7094 | auto *InputTy = |
7095 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7096 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7097 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla"); |
7098 | } |
7099 | case NEON::BI__builtin_neon_vusdot_v: |
7100 | case NEON::BI__builtin_neon_vusdotq_v: { |
7101 | auto *InputTy = |
7102 | llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); |
7103 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7104 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot"); |
7105 | } |
7106 | case NEON::BI__builtin_neon_vbfdot_v: |
7107 | case NEON::BI__builtin_neon_vbfdotq_v: { |
7108 | llvm::Type *InputTy = |
7109 | llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16); |
7110 | llvm::Type *Tys[2] = { Ty, InputTy }; |
7111 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot"); |
7112 | } |
7113 | case NEON::BI__builtin_neon___a32_vcvt_bf16_v: { |
7114 | llvm::Type *Tys[1] = { Ty }; |
7115 | Function *F = CGM.getIntrinsic(Int, Tys); |
7116 | return EmitNeonCall(F, Ops, "vcvtfp2bf"); |
7117 | } |
7118 | |
7119 | } |
7120 | |
7121 | assert(Int && "Expected valid intrinsic number"); |
7122 | |
7123 | |
7124 | Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E); |
7125 | |
7126 | Value *Result = EmitNeonCall(F, Ops, NameHint); |
7127 | llvm::Type *ResultType = ConvertType(E->getType()); |
7128 | |
7129 | |
7130 | return Builder.CreateBitCast(Result, ResultType, NameHint); |
7131 | } |
7132 | |
7133 | Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr( |
7134 | Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp, |
7135 | const CmpInst::Predicate Ip, const Twine &Name) { |
7136 | llvm::Type *OTy = Op->getType(); |
7137 | |
7138 | |
7139 | |
7140 | |
7141 | |
7142 | if (BitCastInst *BI = dyn_cast<BitCastInst>(Op)) |
7143 | OTy = BI->getOperand(0)->getType(); |
7144 | |
7145 | Op = Builder.CreateBitCast(Op, OTy); |
7146 | if (OTy->getScalarType()->isFloatingPointTy()) { |
7147 | Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy)); |
7148 | } else { |
7149 | Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy)); |
7150 | } |
7151 | return Builder.CreateSExt(Op, Ty, Name); |
7152 | } |
7153 | |
7154 | static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
7155 | Value *ExtOp, Value *IndexOp, |
7156 | llvm::Type *ResTy, unsigned IntID, |
7157 | const char *Name) { |
7158 | SmallVector<Value *, 2> TblOps; |
7159 | if (ExtOp) |
7160 | TblOps.push_back(ExtOp); |
7161 | |
7162 | |
7163 | SmallVector<int, 16> Indices; |
7164 | auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
7165 | for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { |
7166 | Indices.push_back(2*i); |
7167 | Indices.push_back(2*i+1); |
7168 | } |
7169 | |
7170 | int PairPos = 0, End = Ops.size() - 1; |
7171 | while (PairPos < End) { |
7172 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], |
7173 | Ops[PairPos+1], Indices, |
7174 | Name)); |
7175 | PairPos += 2; |
7176 | } |
7177 | |
7178 | |
7179 | |
7180 | if (PairPos == End) { |
7181 | Value *ZeroTbl = ConstantAggregateZero::get(TblTy); |
7182 | TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], |
7183 | ZeroTbl, Indices, Name)); |
7184 | } |
7185 | |
7186 | Function *TblF; |
7187 | TblOps.push_back(IndexOp); |
7188 | TblF = CGF.CGM.getIntrinsic(IntID, ResTy); |
7189 | |
7190 | return CGF.EmitNeonCall(TblF, TblOps, Name); |
7191 | } |
7192 | |
7193 | Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) { |
7194 | unsigned Value; |
7195 | switch (BuiltinID) { |
7196 | default: |
7197 | return nullptr; |
7198 | case ARM::BI__builtin_arm_nop: |
7199 | Value = 0; |
7200 | break; |
7201 | case ARM::BI__builtin_arm_yield: |
7202 | case ARM::BI__yield: |
7203 | Value = 1; |
7204 | break; |
7205 | case ARM::BI__builtin_arm_wfe: |
7206 | case ARM::BI__wfe: |
7207 | Value = 2; |
7208 | break; |
7209 | case ARM::BI__builtin_arm_wfi: |
7210 | case ARM::BI__wfi: |
7211 | Value = 3; |
7212 | break; |
7213 | case ARM::BI__builtin_arm_sev: |
7214 | case ARM::BI__sev: |
7215 | Value = 4; |
7216 | break; |
7217 | case ARM::BI__builtin_arm_sevl: |
7218 | case ARM::BI__sevl: |
7219 | Value = 5; |
7220 | break; |
7221 | } |
7222 | |
7223 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint), |
7224 | llvm::ConstantInt::get(Int32Ty, Value)); |
7225 | } |
7226 | |
7227 | enum SpecialRegisterAccessKind { |
7228 | NormalRead, |
7229 | VolatileRead, |
7230 | Write, |
7231 | }; |
7232 | |
7233 | |
7234 | |
7235 | |
7236 | static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF, |
7237 | const CallExpr *E, |
7238 | llvm::Type *RegisterType, |
7239 | llvm::Type *ValueType, |
7240 | SpecialRegisterAccessKind AccessKind, |
7241 | StringRef SysReg = "") { |
7242 | |
7243 | assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) |
7244 | && "Unsupported size for register."); |
7245 | |
7246 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
7247 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
7248 | LLVMContext &Context = CGM.getLLVMContext(); |
7249 | |
7250 | if (SysReg.empty()) { |
7251 | const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts(); |
7252 | SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString(); |
7253 | } |
7254 | |
7255 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) }; |
7256 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
7257 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
7258 | |
7259 | llvm::Type *Types[] = { RegisterType }; |
7260 | |
7261 | bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32); |
7262 | assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) |
7263 | && "Can't fit 64-bit value in 32-bit register"); |
7264 | |
7265 | if (AccessKind != Write) { |
7266 | assert(AccessKind == NormalRead || AccessKind == VolatileRead); |
7267 | llvm::Function *F = CGM.getIntrinsic( |
7268 | AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register |
7269 | : llvm::Intrinsic::read_register, |
7270 | Types); |
7271 | llvm::Value *Call = Builder.CreateCall(F, Metadata); |
7272 | |
7273 | if (MixedTypes) |
7274 | |
7275 | return Builder.CreateTrunc(Call, ValueType); |
7276 | |
7277 | if (ValueType->isPointerTy()) |
7278 | |
7279 | return Builder.CreateIntToPtr(Call, ValueType); |
7280 | |
7281 | return Call; |
7282 | } |
7283 | |
7284 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
7285 | llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1)); |
7286 | if (MixedTypes) { |
7287 | |
7288 | ArgValue = Builder.CreateZExt(ArgValue, RegisterType); |
7289 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7290 | } |
7291 | |
7292 | if (ValueType->isPointerTy()) { |
7293 | |
7294 | ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType); |
7295 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7296 | } |
7297 | |
7298 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
7299 | } |
7300 | |
7301 | |
7302 | |
7303 | static bool HasExtraNeonArgument(unsigned BuiltinID) { |
7304 | switch (BuiltinID) { |
7305 | default: break; |
7306 | case NEON::BI__builtin_neon_vget_lane_i8: |
7307 | case NEON::BI__builtin_neon_vget_lane_i16: |
7308 | case NEON::BI__builtin_neon_vget_lane_bf16: |
7309 | case NEON::BI__builtin_neon_vget_lane_i32: |
7310 | case NEON::BI__builtin_neon_vget_lane_i64: |
7311 | case NEON::BI__builtin_neon_vget_lane_f32: |
7312 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
7313 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
7314 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
7315 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
7316 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
7317 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
7318 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
7319 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
7320 | case NEON::BI__builtin_neon_vset_lane_i8: |
7321 | case NEON::BI__builtin_neon_vset_lane_i16: |
7322 | case NEON::BI__builtin_neon_vset_lane_bf16: |
7323 | case NEON::BI__builtin_neon_vset_lane_i32: |
7324 | case NEON::BI__builtin_neon_vset_lane_i64: |
7325 | case NEON::BI__builtin_neon_vset_lane_f32: |
7326 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
7327 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
7328 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
7329 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
7330 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
7331 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
7332 | case NEON::BI__builtin_neon_vsha1h_u32: |
7333 | case NEON::BI__builtin_neon_vsha1cq_u32: |
7334 | case NEON::BI__builtin_neon_vsha1pq_u32: |
7335 | case NEON::BI__builtin_neon_vsha1mq_u32: |
7336 | case NEON::BI__builtin_neon_vcvth_bf16_f32: |
7337 | case clang::ARM::BI_MoveToCoprocessor: |
7338 | case clang::ARM::BI_MoveToCoprocessor2: |
7339 | return false; |
7340 | } |
7341 | return true; |
7342 | } |
7343 | |
7344 | Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, |
7345 | const CallExpr *E, |
7346 | ReturnValueSlot ReturnValue, |
7347 | llvm::Triple::ArchType Arch) { |
7348 | if (auto Hint = GetValueForARMHint(BuiltinID)) |
7349 | return Hint; |
7350 | |
7351 | if (BuiltinID == ARM::BI__emit) { |
7352 | bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb; |
7353 | llvm::FunctionType *FTy = |
7354 | llvm::FunctionType::get(VoidTy, false); |
7355 | |
7356 | Expr::EvalResult Result; |
7357 | if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) |
7358 | llvm_unreachable("Sema will ensure that the parameter is constant"); |
7359 | |
7360 | llvm::APSInt Value = Result.Val.getInt(); |
7361 | uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue(); |
7362 | |
7363 | llvm::InlineAsm *Emit = |
7364 | IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "", |
7365 | true) |
7366 | : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "", |
7367 | true); |
7368 | |
7369 | return Builder.CreateCall(Emit); |
7370 | } |
7371 | |
7372 | if (BuiltinID == ARM::BI__builtin_arm_dbg) { |
7373 | Value *Option = EmitScalarExpr(E->getArg(0)); |
7374 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option); |
7375 | } |
7376 | |
7377 | if (BuiltinID == ARM::BI__builtin_arm_prefetch) { |
7378 | Value *Address = EmitScalarExpr(E->getArg(0)); |
7379 | Value *RW = EmitScalarExpr(E->getArg(1)); |
7380 | Value *IsData = EmitScalarExpr(E->getArg(2)); |
7381 | |
7382 | |
7383 | Value *Locality = llvm::ConstantInt::get(Int32Ty, 3); |
7384 | |
7385 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
7386 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); |
7387 | } |
7388 | |
7389 | if (BuiltinID == ARM::BI__builtin_arm_rbit) { |
7390 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
7391 | return Builder.CreateCall( |
7392 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
7393 | } |
7394 | |
7395 | if (BuiltinID == ARM::BI__builtin_arm_cls) { |
7396 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
7397 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls"); |
7398 | } |
7399 | if (BuiltinID == ARM::BI__builtin_arm_cls64) { |
7400 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
7401 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg, |
7402 | "cls"); |
7403 | } |
7404 | |
7405 | if (BuiltinID == ARM::BI__clear_cache) { |
7406 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); |
7407 | const FunctionDecl *FD = E->getDirectCallee(); |
7408 | Value *Ops[2]; |
7409 | for (unsigned i = 0; i < 2; i++) |
7410 | Ops[i] = EmitScalarExpr(E->getArg(i)); |
7411 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); |
7412 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); |
7413 | StringRef Name = FD->getName(); |
7414 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); |
7415 | } |
7416 | |
7417 | if (BuiltinID == ARM::BI__builtin_arm_mcrr || |
7418 | BuiltinID == ARM::BI__builtin_arm_mcrr2) { |
7419 | Function *F; |
7420 | |
7421 | switch (BuiltinID) { |
7422 | default: llvm_unreachable("unexpected builtin"); |
7423 | case ARM::BI__builtin_arm_mcrr: |
7424 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr); |
7425 | break; |
7426 | case ARM::BI__builtin_arm_mcrr2: |
7427 | F = CGM.getIntrinsic(Intrinsic::arm_mcrr2); |
7428 | break; |
7429 | } |
7430 | |
7431 | |
7432 | |
7433 | |
7434 | |
7435 | |
7436 | |
7437 | |
7438 | Value *Coproc = EmitScalarExpr(E->getArg(0)); |
7439 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); |
7440 | Value *RtAndRt2 = EmitScalarExpr(E->getArg(2)); |
7441 | Value *CRm = EmitScalarExpr(E->getArg(3)); |
7442 | |
7443 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); |
7444 | Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty); |
7445 | Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1); |
7446 | Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty); |
7447 | |
7448 | return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm}); |
7449 | } |
7450 | |
7451 | if (BuiltinID == ARM::BI__builtin_arm_mrrc || |
7452 | BuiltinID == ARM::BI__builtin_arm_mrrc2) { |
7453 | Function *F; |
7454 | |
7455 | switch (BuiltinID) { |
7456 | default: llvm_unreachable("unexpected builtin"); |
7457 | case ARM::BI__builtin_arm_mrrc: |
7458 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc); |
7459 | break; |
7460 | case ARM::BI__builtin_arm_mrrc2: |
7461 | F = CGM.getIntrinsic(Intrinsic::arm_mrrc2); |
7462 | break; |
7463 | } |
7464 | |
7465 | Value *Coproc = EmitScalarExpr(E->getArg(0)); |
7466 | Value *Opc1 = EmitScalarExpr(E->getArg(1)); |
7467 | Value *CRm = EmitScalarExpr(E->getArg(2)); |
7468 | Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm}); |
7469 | |
7470 | |
7471 | |
7472 | |
7473 | Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1); |
7474 | Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0); |
7475 | Rt = Builder.CreateZExt(Rt, Int64Ty); |
7476 | Rt1 = Builder.CreateZExt(Rt1, Int64Ty); |
7477 | |
7478 | Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32); |
7479 | RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true); |
7480 | RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1); |
7481 | |
7482 | return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType())); |
7483 | } |
7484 | |
7485 | if (BuiltinID == ARM::BI__builtin_arm_ldrexd || |
7486 | ((BuiltinID == ARM::BI__builtin_arm_ldrex || |
7487 | BuiltinID == ARM::BI__builtin_arm_ldaex) && |
7488 | getContext().getTypeSize(E->getType()) == 64) || |
7489 | BuiltinID == ARM::BI__ldrexd) { |
7490 | Function *F; |
7491 | |
7492 | switch (BuiltinID) { |
7493 | default: llvm_unreachable("unexpected builtin"); |
7494 | case ARM::BI__builtin_arm_ldaex: |
7495 | F = CGM.getIntrinsic(Intrinsic::arm_ldaexd); |
7496 | break; |
7497 | case ARM::BI__builtin_arm_ldrexd: |
7498 | case ARM::BI__builtin_arm_ldrex: |
7499 | case ARM::BI__ldrexd: |
7500 | F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); |
7501 | break; |
7502 | } |
7503 | |
7504 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); |
7505 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), |
7506 | "ldrexd"); |
7507 | |
7508 | Value *Val0 = Builder.CreateExtractValue(Val, 1); |
7509 | Value *Val1 = Builder.CreateExtractValue(Val, 0); |
7510 | Val0 = Builder.CreateZExt(Val0, Int64Ty); |
7511 | Val1 = Builder.CreateZExt(Val1, Int64Ty); |
7512 | |
7513 | Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); |
7514 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true ); |
7515 | Val = Builder.CreateOr(Val, Val1); |
7516 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); |
7517 | } |
7518 | |
7519 | if (BuiltinID == ARM::BI__builtin_arm_ldrex || |
7520 | BuiltinID == ARM::BI__builtin_arm_ldaex) { |
7521 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); |
7522 | |
7523 | QualType Ty = E->getType(); |
7524 | llvm::Type *RealResTy = ConvertType(Ty); |
7525 | llvm::Type *PtrTy = llvm::IntegerType::get( |
7526 | getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(); |
7527 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); |
7528 | |
7529 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex |
7530 | ? Intrinsic::arm_ldaex |
7531 | : Intrinsic::arm_ldrex, |
7532 | PtrTy); |
7533 | Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex"); |
7534 | |
7535 | if (RealResTy->isPointerTy()) |
7536 | return Builder.CreateIntToPtr(Val, RealResTy); |
7537 | else { |
7538 | llvm::Type *IntResTy = llvm::IntegerType::get( |
7539 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); |
7540 | Val = Builder.CreateTruncOrBitCast(Val, IntResTy); |
7541 | return Builder.CreateBitCast(Val, RealResTy); |
7542 | } |
7543 | } |
7544 | |
7545 | if (BuiltinID == ARM::BI__builtin_arm_strexd || |
7546 | ((BuiltinID == ARM::BI__builtin_arm_stlex || |
7547 | BuiltinID == ARM::BI__builtin_arm_strex) && |
7548 | getContext().getTypeSize(E->getArg(0)->getType()) == 64)) { |
7549 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex |
7550 | ? Intrinsic::arm_stlexd |
7551 | : Intrinsic::arm_strexd); |
7552 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty); |
7553 | |
7554 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
7555 | Value *Val = EmitScalarExpr(E->getArg(0)); |
7556 | Builder.CreateStore(Val, Tmp); |
7557 | |
7558 | Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy)); |
7559 | Val = Builder.CreateLoad(LdPtr); |
7560 | |
7561 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); |
7562 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); |
7563 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); |
7564 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd"); |
7565 | } |
7566 | |
7567 | if (BuiltinID == ARM::BI__builtin_arm_strex || |
7568 | BuiltinID == ARM::BI__builtin_arm_stlex) { |
7569 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); |
7570 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); |
7571 | |
7572 | QualType Ty = E->getArg(0)->getType(); |
7573 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), |
7574 | getContext().getTypeSize(Ty)); |
7575 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); |
7576 | |
7577 | if (StoreVal->getType()->isPointerTy()) |
7578 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty); |
7579 | else { |
7580 | llvm::Type *IntTy = llvm::IntegerType::get( |
7581 | getLLVMContext(), |
7582 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); |
7583 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); |
7584 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty); |
7585 | } |
7586 | |
7587 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex |
7588 | ? Intrinsic::arm_stlex |
7589 | : Intrinsic::arm_strex, |
7590 | StoreAddr->getType()); |
7591 | return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex"); |
7592 | } |
7593 | |
7594 | if (BuiltinID == ARM::BI__builtin_arm_clrex) { |
7595 | Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex); |
7596 | return Builder.CreateCall(F); |
7597 | } |
7598 | |
7599 | |
7600 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
7601 | switch (BuiltinID) { |
7602 | case ARM::BI__builtin_arm_crc32b: |
7603 | CRCIntrinsicID = Intrinsic::arm_crc32b; break; |
7604 | case ARM::BI__builtin_arm_crc32cb: |
7605 | CRCIntrinsicID = Intrinsic::arm_crc32cb; break; |
7606 | case ARM::BI__builtin_arm_crc32h: |
7607 | CRCIntrinsicID = Intrinsic::arm_crc32h; break; |
7608 | case ARM::BI__builtin_arm_crc32ch: |
7609 | CRCIntrinsicID = Intrinsic::arm_crc32ch; break; |
7610 | case ARM::BI__builtin_arm_crc32w: |
7611 | case ARM::BI__builtin_arm_crc32d: |
7612 | CRCIntrinsicID = Intrinsic::arm_crc32w; break; |
7613 | case ARM::BI__builtin_arm_crc32cw: |
7614 | case ARM::BI__builtin_arm_crc32cd: |
7615 | CRCIntrinsicID = Intrinsic::arm_crc32cw; break; |
7616 | } |
7617 | |
7618 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
7619 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
7620 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
7621 | |
7622 | |
7623 | |
7624 | if (BuiltinID == ARM::BI__builtin_arm_crc32d || |
7625 | BuiltinID == ARM::BI__builtin_arm_crc32cd) { |
7626 | Value *C1 = llvm::ConstantInt::get(Int64Ty, 32); |
7627 | Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty); |
7628 | Value *Arg1b = Builder.CreateLShr(Arg1, C1); |
7629 | Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty); |
7630 | |
7631 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
7632 | Value *Res = Builder.CreateCall(F, {Arg0, Arg1a}); |
7633 | return Builder.CreateCall(F, {Res, Arg1b}); |
7634 | } else { |
7635 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty); |
7636 | |
7637 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
7638 | return Builder.CreateCall(F, {Arg0, Arg1}); |
7639 | } |
7640 | } |
7641 | |
7642 | if (BuiltinID == ARM::BI__builtin_arm_rsr || |
7643 | BuiltinID == ARM::BI__builtin_arm_rsr64 || |
7644 | BuiltinID == ARM::BI__builtin_arm_rsrp || |
7645 | BuiltinID == ARM::BI__builtin_arm_wsr || |
7646 | BuiltinID == ARM::BI__builtin_arm_wsr64 || |
7647 | BuiltinID == ARM::BI__builtin_arm_wsrp) { |
7648 | |
7649 | SpecialRegisterAccessKind AccessKind = Write; |
7650 | if (BuiltinID == ARM::BI__builtin_arm_rsr || |
7651 | BuiltinID == ARM::BI__builtin_arm_rsr64 || |
7652 | BuiltinID == ARM::BI__builtin_arm_rsrp) |
7653 | AccessKind = VolatileRead; |
7654 | |
7655 | bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp || |
7656 | BuiltinID == ARM::BI__builtin_arm_wsrp; |
7657 | |
7658 | bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 || |
7659 | BuiltinID == ARM::BI__builtin_arm_wsr64; |
7660 | |
7661 | llvm::Type *ValueType; |
7662 | llvm::Type *RegisterType; |
7663 | if (IsPointerBuiltin) { |
7664 | ValueType = VoidPtrTy; |
7665 | RegisterType = Int32Ty; |
7666 | } else if (Is64Bit) { |
7667 | ValueType = RegisterType = Int64Ty; |
7668 | } else { |
7669 | ValueType = RegisterType = Int32Ty; |
7670 | } |
7671 | |
7672 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, |
7673 | AccessKind); |
7674 | } |
7675 | |
7676 | |
7677 | |
7678 | if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID)) |
7679 | return EmitMSVCBuiltinExpr(*MsvcIntId, E); |
7680 | |
7681 | |
7682 | if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch)) |
7683 | return Result; |
7684 | |
7685 | if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch)) |
7686 | return Result; |
7687 | |
7688 | |
7689 | |
7690 | unsigned ICEArguments = 0; |
7691 | ASTContext::GetBuiltinTypeError Error; |
7692 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
7693 | assert(Error == ASTContext::GE_None && "Should not codegen an error"); |
7694 | |
7695 | auto getAlignmentValue32 = [&](Address addr) -> Value* { |
7696 | return Builder.getInt32(addr.getAlignment().getQuantity()); |
7697 | }; |
7698 | |
7699 | Address PtrOp0 = Address::invalid(); |
7700 | Address PtrOp1 = Address::invalid(); |
7701 | SmallVector<Value*, 4> Ops; |
7702 | bool HasExtraArg = HasExtraNeonArgument(BuiltinID); |
7703 | unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0); |
7704 | for (unsigned i = 0, e = NumArgs; i != e; i++) { |
7705 | if (i == 0) { |
7706 | switch (BuiltinID) { |
7707 | case NEON::BI__builtin_neon_vld1_v: |
7708 | case NEON::BI__builtin_neon_vld1q_v: |
7709 | case NEON::BI__builtin_neon_vld1q_lane_v: |
7710 | case NEON::BI__builtin_neon_vld1_lane_v: |
7711 | case NEON::BI__builtin_neon_vld1_dup_v: |
7712 | case NEON::BI__builtin_neon_vld1q_dup_v: |
7713 | case NEON::BI__builtin_neon_vst1_v: |
7714 | case NEON::BI__builtin_neon_vst1q_v: |
7715 | case NEON::BI__builtin_neon_vst1q_lane_v: |
7716 | case NEON::BI__builtin_neon_vst1_lane_v: |
7717 | case NEON::BI__builtin_neon_vst2_v: |
7718 | case NEON::BI__builtin_neon_vst2q_v: |
7719 | case NEON::BI__builtin_neon_vst2_lane_v: |
7720 | case NEON::BI__builtin_neon_vst2q_lane_v: |
7721 | case NEON::BI__builtin_neon_vst3_v: |
7722 | case NEON::BI__builtin_neon_vst3q_v: |
7723 | case NEON::BI__builtin_neon_vst3_lane_v: |
7724 | case NEON::BI__builtin_neon_vst3q_lane_v: |
7725 | case NEON::BI__builtin_neon_vst4_v: |
7726 | case NEON::BI__builtin_neon_vst4q_v: |
7727 | case NEON::BI__builtin_neon_vst4_lane_v: |
7728 | case NEON::BI__builtin_neon_vst4q_lane_v: |
7729 | |
7730 | |
7731 | PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); |
7732 | Ops.push_back(PtrOp0.getPointer()); |
7733 | continue; |
7734 | } |
7735 | } |
7736 | if (i == 1) { |
7737 | switch (BuiltinID) { |
7738 | case NEON::BI__builtin_neon_vld2_v: |
7739 | case NEON::BI__builtin_neon_vld2q_v: |
7740 | case NEON::BI__builtin_neon_vld3_v: |
7741 | case NEON::BI__builtin_neon_vld3q_v: |
7742 | case NEON::BI__builtin_neon_vld4_v: |
7743 | case NEON::BI__builtin_neon_vld4q_v: |
7744 | case NEON::BI__builtin_neon_vld2_lane_v: |
7745 | case NEON::BI__builtin_neon_vld2q_lane_v: |
7746 | case NEON::BI__builtin_neon_vld3_lane_v: |
7747 | case NEON::BI__builtin_neon_vld3q_lane_v: |
7748 | case NEON::BI__builtin_neon_vld4_lane_v: |
7749 | case NEON::BI__builtin_neon_vld4q_lane_v: |
7750 | case NEON::BI__builtin_neon_vld2_dup_v: |
7751 | case NEON::BI__builtin_neon_vld2q_dup_v: |
7752 | case NEON::BI__builtin_neon_vld3_dup_v: |
7753 | case NEON::BI__builtin_neon_vld3q_dup_v: |
7754 | case NEON::BI__builtin_neon_vld4_dup_v: |
7755 | case NEON::BI__builtin_neon_vld4q_dup_v: |
7756 | |
7757 | |
7758 | PtrOp1 = EmitPointerWithAlignment(E->getArg(1)); |
7759 | Ops.push_back(PtrOp1.getPointer()); |
7760 | continue; |
7761 | } |
7762 | } |
7763 | |
7764 | if ((ICEArguments & (1 << i)) == 0) { |
7765 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
7766 | } else { |
7767 | |
7768 | |
7769 | Ops.push_back(llvm::ConstantInt::get( |
7770 | getLLVMContext(), |
7771 | *E->getArg(i)->getIntegerConstantExpr(getContext()))); |
7772 | } |
7773 | } |
7774 | |
7775 | switch (BuiltinID) { |
7776 | default: break; |
7777 | |
7778 | case NEON::BI__builtin_neon_vget_lane_i8: |
7779 | case NEON::BI__builtin_neon_vget_lane_i16: |
7780 | case NEON::BI__builtin_neon_vget_lane_i32: |
7781 | case NEON::BI__builtin_neon_vget_lane_i64: |
7782 | case NEON::BI__builtin_neon_vget_lane_bf16: |
7783 | case NEON::BI__builtin_neon_vget_lane_f32: |
7784 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
7785 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
7786 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
7787 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
7788 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
7789 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
7790 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
7791 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
7792 | return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane"); |
7793 | |
7794 | case NEON::BI__builtin_neon_vrndns_f32: { |
7795 | Value *Arg = EmitScalarExpr(E->getArg(0)); |
7796 | llvm::Type *Tys[] = {Arg->getType()}; |
7797 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys); |
7798 | return Builder.CreateCall(F, {Arg}, "vrndn"); } |
7799 | |
7800 | case NEON::BI__builtin_neon_vset_lane_i8: |
7801 | case NEON::BI__builtin_neon_vset_lane_i16: |
7802 | case NEON::BI__builtin_neon_vset_lane_i32: |
7803 | case NEON::BI__builtin_neon_vset_lane_i64: |
7804 | case NEON::BI__builtin_neon_vset_lane_bf16: |
7805 | case NEON::BI__builtin_neon_vset_lane_f32: |
7806 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
7807 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
7808 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
7809 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
7810 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
7811 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
7812 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
7813 | |
7814 | case NEON::BI__builtin_neon_vsha1h_u32: |
7815 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops, |
7816 | "vsha1h"); |
7817 | case NEON::BI__builtin_neon_vsha1cq_u32: |
7818 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops, |
7819 | "vsha1h"); |
7820 | case NEON::BI__builtin_neon_vsha1pq_u32: |
7821 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops, |
7822 | "vsha1h"); |
7823 | case NEON::BI__builtin_neon_vsha1mq_u32: |
7824 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops, |
7825 | "vsha1h"); |
7826 | |
7827 | case NEON::BI__builtin_neon_vcvth_bf16_f32: { |
7828 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops, |
7829 | "vcvtbfp2bf"); |
7830 | } |
7831 | |
7832 | |
7833 | |
7834 | case ARM::BI_MoveToCoprocessor: |
7835 | case ARM::BI_MoveToCoprocessor2: { |
7836 | Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ? |
7837 | Intrinsic::arm_mcr : Intrinsic::arm_mcr2); |
7838 | return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0], |
7839 | Ops[3], Ops[4], Ops[5]}); |
7840 | } |
7841 | } |
7842 | |
7843 | |
7844 | assert(HasExtraArg); |
7845 | const Expr *Arg = E->getArg(E->getNumArgs()-1); |
7846 | Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()); |
7847 | if (!Result) |
7848 | return nullptr; |
7849 | |
7850 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || |
7851 | BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { |
7852 | |
7853 | llvm::Type *Ty; |
7854 | if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) |
7855 | Ty = FloatTy; |
7856 | else |
7857 | Ty = DoubleTy; |
7858 | |
7859 | |
7860 | bool usgn = Result->getZExtValue() == 1; |
7861 | unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; |
7862 | |
7863 | |
7864 | Function *F = CGM.getIntrinsic(Int, Ty); |
7865 | return Builder.CreateCall(F, Ops, "vcvtr"); |
7866 | } |
7867 | |
7868 | |
7869 | NeonTypeFlags Type = Result->getZExtValue(); |
7870 | bool usgn = Type.isUnsigned(); |
7871 | bool rightShift = false; |
7872 | |
7873 | llvm::FixedVectorType *VTy = |
7874 | GetNeonType(this, Type, getTarget().hasLegalHalfType(), false, |
7875 | getTarget().hasBFloat16Type()); |
7876 | llvm::Type *Ty = VTy; |
7877 | if (!Ty) |
7878 | return nullptr; |
7879 | |
7880 | |
7881 | |
7882 | auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap); |
7883 | const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( |
7884 | IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted); |
7885 | if (Builtin) |
7886 | return EmitCommonNeonBuiltinExpr( |
7887 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, |
7888 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch); |
7889 | |
7890 | unsigned Int; |
7891 | switch (BuiltinID) { |
7892 | default: return nullptr; |
7893 | case NEON::BI__builtin_neon_vld1q_lane_v: |
7894 | |
7895 | |
7896 | if (VTy->getElementType()->isIntegerTy(64)) { |
7897 | |
7898 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7899 | int Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); |
7900 | Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); |
7901 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); |
7902 | |
7903 | Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1); |
7904 | llvm::Type *Tys[] = {Ty, Int8PtrTy}; |
7905 | Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys); |
7906 | Value *Align = getAlignmentValue32(PtrOp0); |
7907 | Value *Ld = Builder.CreateCall(F, {Ops[0], Align}); |
7908 | |
7909 | int Indices[] = {1 - Lane, Lane}; |
7910 | return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane"); |
7911 | } |
7912 | LLVM_FALLTHROUGH; |
7913 | case NEON::BI__builtin_neon_vld1_lane_v: { |
7914 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7915 | PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType()); |
7916 | Value *Ld = Builder.CreateLoad(PtrOp0); |
7917 | return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane"); |
7918 | } |
7919 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
7920 | Int = |
7921 | usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; |
7922 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", |
7923 | 1, true); |
7924 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
7925 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), |
7926 | Ops, "vqrshrun_n", 1, true); |
7927 | case NEON::BI__builtin_neon_vqshrn_n_v: |
7928 | Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; |
7929 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", |
7930 | 1, true); |
7931 | case NEON::BI__builtin_neon_vqshrun_n_v: |
7932 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), |
7933 | Ops, "vqshrun_n", 1, true); |
7934 | case NEON::BI__builtin_neon_vrecpe_v: |
7935 | case NEON::BI__builtin_neon_vrecpeq_v: |
7936 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), |
7937 | Ops, "vrecpe"); |
7938 | case NEON::BI__builtin_neon_vrshrn_n_v: |
7939 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), |
7940 | Ops, "vrshrn_n", 1, true); |
7941 | case NEON::BI__builtin_neon_vrsra_n_v: |
7942 | case NEON::BI__builtin_neon_vrsraq_n_v: |
7943 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
7944 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7945 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); |
7946 | Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; |
7947 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]}); |
7948 | return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); |
7949 | case NEON::BI__builtin_neon_vsri_n_v: |
7950 | case NEON::BI__builtin_neon_vsriq_n_v: |
7951 | rightShift = true; |
7952 | LLVM_FALLTHROUGH; |
7953 | case NEON::BI__builtin_neon_vsli_n_v: |
7954 | case NEON::BI__builtin_neon_vsliq_n_v: |
7955 | Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); |
7956 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), |
7957 | Ops, "vsli_n"); |
7958 | case NEON::BI__builtin_neon_vsra_n_v: |
7959 | case NEON::BI__builtin_neon_vsraq_n_v: |
7960 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
7961 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); |
7962 | return Builder.CreateAdd(Ops[0], Ops[1]); |
7963 | case NEON::BI__builtin_neon_vst1q_lane_v: |
7964 | |
7965 | |
7966 | if (VTy->getElementType()->isIntegerTy(64)) { |
7967 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7968 | Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2])); |
7969 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); |
7970 | Ops[2] = getAlignmentValue32(PtrOp0); |
7971 | llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()}; |
7972 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, |
7973 | Tys), Ops); |
7974 | } |
7975 | LLVM_FALLTHROUGH; |
7976 | case NEON::BI__builtin_neon_vst1_lane_v: { |
7977 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
7978 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); |
7979 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
7980 | auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty)); |
7981 | return St; |
7982 | } |
7983 | case NEON::BI__builtin_neon_vtbl1_v: |
7984 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), |
7985 | Ops, "vtbl1"); |
7986 | case NEON::BI__builtin_neon_vtbl2_v: |
7987 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), |
7988 | Ops, "vtbl2"); |
7989 | case NEON::BI__builtin_neon_vtbl3_v: |
7990 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), |
7991 | Ops, "vtbl3"); |
7992 | case NEON::BI__builtin_neon_vtbl4_v: |
7993 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), |
7994 | Ops, "vtbl4"); |
7995 | case NEON::BI__builtin_neon_vtbx1_v: |
7996 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), |
7997 | Ops, "vtbx1"); |
7998 | case NEON::BI__builtin_neon_vtbx2_v: |
7999 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), |
8000 | Ops, "vtbx2"); |
8001 | case NEON::BI__builtin_neon_vtbx3_v: |
8002 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), |
8003 | Ops, "vtbx3"); |
8004 | case NEON::BI__builtin_neon_vtbx4_v: |
8005 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), |
8006 | Ops, "vtbx4"); |
8007 | } |
8008 | } |
8009 | |
8010 | template<typename Integer> |
8011 | static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) { |
8012 | return E->getIntegerConstantExpr(Context)->getExtValue(); |
8013 | } |
8014 | |
8015 | static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V, |
8016 | llvm::Type *T, bool Unsigned) { |
8017 | |
8018 | |
8019 | return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T); |
8020 | } |
8021 | |
8022 | static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V, |
8023 | uint32_t Shift, bool Unsigned) { |
8024 | |
8025 | |
8026 | |
8027 | |
8028 | |
8029 | unsigned LaneBits = cast<llvm::VectorType>(V->getType()) |
8030 | ->getElementType() |
8031 | ->getPrimitiveSizeInBits(); |
8032 | if (Shift == LaneBits) { |
8033 | |
8034 | |
8035 | |
8036 | if (Unsigned) |
8037 | return llvm::Constant::getNullValue(V->getType()); |
8038 | else |
8039 | --Shift; |
8040 | } |
8041 | return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift); |
8042 | } |
8043 | |
8044 | static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) { |
8045 | |
8046 | |
8047 | |
8048 | unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits(); |
8049 | return Builder.CreateVectorSplat(Elements, V); |
8050 | } |
8051 | |
8052 | static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder, |
8053 | CodeGenFunction *CGF, |
8054 | llvm::Value *V, |
8055 | llvm::Type *DestType) { |
8056 | |
8057 | |
8058 | |
8059 | |
8060 | |
8061 | |
8062 | |
8063 | |
8064 | |
8065 | |
8066 | |
8067 | if (CGF->getTarget().isBigEndian() && |
8068 | V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) { |
8069 | return Builder.CreateCall( |
8070 | CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq, |
8071 | {DestType, V->getType()}), |
8072 | V); |
8073 | } else { |
8074 | return Builder.CreateBitCast(V, DestType); |
8075 | } |
8076 | } |
8077 | |
8078 | static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) { |
8079 | |
8080 | |
8081 | SmallVector<int, 16> Indices; |
8082 | unsigned InputElements = |
8083 | cast<llvm::FixedVectorType>(V->getType())->getNumElements(); |
8084 | for (unsigned i = 0; i < InputElements; i += 2) |
8085 | Indices.push_back(i + Odd); |
8086 | return Builder.CreateShuffleVector(V, Indices); |
8087 | } |
8088 | |
8089 | static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0, |
8090 | llvm::Value *V1) { |
8091 | |
8092 | assert(V0->getType() == V1->getType() && "Can't zip different vector types"); |
8093 | SmallVector<int, 16> Indices; |
8094 | unsigned InputElements = |
8095 | cast<llvm::FixedVectorType>(V0->getType())->getNumElements(); |
8096 | for (unsigned i = 0; i < InputElements; i++) { |
8097 | Indices.push_back(i); |
8098 | Indices.push_back(i + InputElements); |
8099 | } |
8100 | return Builder.CreateShuffleVector(V0, V1, Indices); |
8101 | } |
8102 | |
8103 | template<unsigned HighBit, unsigned OtherBits> |
8104 | static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) { |
8105 | |
8106 | |
8107 | llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType(); |
8108 | unsigned LaneBits = T->getPrimitiveSizeInBits(); |
8109 | uint32_t Value = HighBit << (LaneBits - 1); |
8110 | if (OtherBits) |
8111 | Value |= (1UL << (LaneBits - 1)) - 1; |
8112 | llvm::Value *Lane = llvm::ConstantInt::get(T, Value); |
8113 | return ARMMVEVectorSplat(Builder, Lane); |
8114 | } |
8115 | |
8116 | static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder, |
8117 | llvm::Value *V, |
8118 | unsigned ReverseWidth) { |
8119 | |
8120 | |
8121 | SmallVector<int, 16> Indices; |
8122 | unsigned LaneSize = V->getType()->getScalarSizeInBits(); |
8123 | unsigned Elements = 128 / LaneSize; |
8124 | unsigned Mask = ReverseWidth / LaneSize - 1; |
8125 | for (unsigned i = 0; i < Elements; i++) |
8126 | Indices.push_back(i ^ Mask); |
8127 | return Builder.CreateShuffleVector(V, Indices); |
8128 | } |
8129 | |
8130 | Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID, |
8131 | const CallExpr *E, |
8132 | ReturnValueSlot ReturnValue, |
8133 | llvm::Triple::ArchType Arch) { |
8134 | enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType; |
8135 | Intrinsic::ID IRIntr; |
8136 | unsigned NumVectors; |
8137 | |
8138 | |
8139 | switch (BuiltinID) { |
8140 | #include "clang/Basic/arm_mve_builtin_cg.inc" |
8141 | |
8142 | |
8143 | |
8144 | default: |
8145 | return nullptr; |
8146 | } |
8147 | |
8148 | |
8149 | |
8150 | |
8151 | switch (CustomCodeGenType) { |
8152 | |
8153 | case CustomCodeGen::VLD24: { |
8154 | llvm::SmallVector<Value *, 4> Ops; |
8155 | llvm::SmallVector<llvm::Type *, 4> Tys; |
8156 | |
8157 | auto MvecCType = E->getType(); |
8158 | auto MvecLType = ConvertType(MvecCType); |
8159 | assert(MvecLType->isStructTy() && |
8160 | "Return type for vld[24]q should be a struct"); |
8161 | assert(MvecLType->getStructNumElements() == 1 && |
8162 | "Return-type struct for vld[24]q should have one element"); |
8163 | auto MvecLTypeInner = MvecLType->getStructElementType(0); |
8164 | assert(MvecLTypeInner->isArrayTy() && |
8165 | "Return-type struct for vld[24]q should contain an array"); |
8166 | assert(MvecLTypeInner->getArrayNumElements() == NumVectors && |
8167 | "Array member of return-type struct vld[24]q has wrong length"); |
8168 | auto VecLType = MvecLTypeInner->getArrayElementType(); |
8169 | |
8170 | Tys.push_back(VecLType); |
8171 | |
8172 | auto Addr = E->getArg(0); |
8173 | Ops.push_back(EmitScalarExpr(Addr)); |
8174 | Tys.push_back(ConvertType(Addr->getType())); |
8175 | |
8176 | Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys)); |
8177 | Value *LoadResult = Builder.CreateCall(F, Ops); |
8178 | Value *MvecOut = UndefValue::get(MvecLType); |
8179 | for (unsigned i = 0; i < NumVectors; ++i) { |
8180 | Value *Vec = Builder.CreateExtractValue(LoadResult, i); |
8181 | MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i}); |
8182 | } |
8183 | |
8184 | if (ReturnValue.isNull()) |
8185 | return MvecOut; |
8186 | else |
8187 | return Builder.CreateStore(MvecOut, ReturnValue.getValue()); |
8188 | } |
8189 | |
8190 | case CustomCodeGen::VST24: { |
8191 | llvm::SmallVector<Value *, 4> Ops; |
8192 | llvm::SmallVector<llvm::Type *, 4> Tys; |
8193 | |
8194 | auto Addr = E->getArg(0); |
8195 | Ops.push_back(EmitScalarExpr(Addr)); |
8196 | Tys.push_back(ConvertType(Addr->getType())); |
8197 | |
8198 | auto MvecCType = E->getArg(1)->getType(); |
8199 | auto MvecLType = ConvertType(MvecCType); |
8200 | assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct"); |
8201 | assert(MvecLType->getStructNumElements() == 1 && |
8202 | "Data-type struct for vst2q should have one element"); |
8203 | auto MvecLTypeInner = MvecLType->getStructElementType(0); |
8204 | assert(MvecLTypeInner->isArrayTy() && |
8205 | "Data-type struct for vst2q should contain an array"); |
8206 | assert(MvecLTypeInner->getArrayNumElements() == NumVectors && |
8207 | "Array member of return-type struct vld[24]q has wrong length"); |
8208 | auto VecLType = MvecLTypeInner->getArrayElementType(); |
8209 | |
8210 | Tys.push_back(VecLType); |
8211 | |
8212 | AggValueSlot MvecSlot = CreateAggTemp(MvecCType); |
8213 | EmitAggExpr(E->getArg(1), MvecSlot); |
8214 | auto Mvec = Builder.CreateLoad(MvecSlot.getAddress()); |
8215 | for (unsigned i = 0; i < NumVectors; i++) |
8216 | Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i})); |
8217 | |
8218 | Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys)); |
8219 | Value *ToReturn = nullptr; |
8220 | for (unsigned i = 0; i < NumVectors; i++) { |
8221 | Ops.push_back(llvm::ConstantInt::get(Int32Ty, i)); |
8222 | ToReturn = Builder.CreateCall(F, Ops); |
8223 | Ops.pop_back(); |
8224 | } |
8225 | return ToReturn; |
8226 | } |
8227 | } |
8228 | llvm_unreachable("unknown custom codegen type."); |
8229 | } |
8230 | |
8231 | Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID, |
8232 | const CallExpr *E, |
8233 | ReturnValueSlot ReturnValue, |
8234 | llvm::Triple::ArchType Arch) { |
8235 | switch (BuiltinID) { |
8236 | default: |
8237 | return nullptr; |
8238 | #include "clang/Basic/arm_cde_builtin_cg.inc" |
8239 | } |
8240 | } |
8241 | |
8242 | static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, |
8243 | const CallExpr *E, |
8244 | SmallVectorImpl<Value *> &Ops, |
8245 | llvm::Triple::ArchType Arch) { |
8246 | unsigned int Int = 0; |
8247 | const char *s = nullptr; |
8248 | |
8249 | switch (BuiltinID) { |
8250 | default: |
8251 | return nullptr; |
8252 | case NEON::BI__builtin_neon_vtbl1_v: |
8253 | case NEON::BI__builtin_neon_vqtbl1_v: |
8254 | case NEON::BI__builtin_neon_vqtbl1q_v: |
8255 | case NEON::BI__builtin_neon_vtbl2_v: |
8256 | case NEON::BI__builtin_neon_vqtbl2_v: |
8257 | case NEON::BI__builtin_neon_vqtbl2q_v: |
8258 | case NEON::BI__builtin_neon_vtbl3_v: |
8259 | case NEON::BI__builtin_neon_vqtbl3_v: |
8260 | case NEON::BI__builtin_neon_vqtbl3q_v: |
8261 | case NEON::BI__builtin_neon_vtbl4_v: |
8262 | case NEON::BI__builtin_neon_vqtbl4_v: |
8263 | case NEON::BI__builtin_neon_vqtbl4q_v: |
8264 | break; |
8265 | case NEON::BI__builtin_neon_vtbx1_v: |
8266 | case NEON::BI__builtin_neon_vqtbx1_v: |
8267 | case NEON::BI__builtin_neon_vqtbx1q_v: |
8268 | case NEON::BI__builtin_neon_vtbx2_v: |
8269 | case NEON::BI__builtin_neon_vqtbx2_v: |
8270 | case NEON::BI__builtin_neon_vqtbx2q_v: |
8271 | case NEON::BI__builtin_neon_vtbx3_v: |
8272 | case NEON::BI__builtin_neon_vqtbx3_v: |
8273 | case NEON::BI__builtin_neon_vqtbx3q_v: |
8274 | case NEON::BI__builtin_neon_vtbx4_v: |
8275 | case NEON::BI__builtin_neon_vqtbx4_v: |
8276 | case NEON::BI__builtin_neon_vqtbx4q_v: |
8277 | break; |
8278 | } |
8279 | |
8280 | assert(E->getNumArgs() >= 3); |
8281 | |
8282 | |
8283 | const Expr *Arg = E->getArg(E->getNumArgs() - 1); |
8284 | Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext()); |
8285 | if (!Result) |
8286 | return nullptr; |
8287 | |
8288 | |
8289 | NeonTypeFlags Type = Result->getZExtValue(); |
8290 | llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type); |
8291 | if (!Ty) |
8292 | return nullptr; |
8293 | |
8294 | CodeGen::CGBuilderTy &Builder = CGF.Builder; |
8295 | |
8296 | |
8297 | |
8298 | switch (BuiltinID) { |
8299 | case NEON::BI__builtin_neon_vtbl1_v: { |
8300 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr, |
8301 | Ops[1], Ty, Intrinsic::aarch64_neon_tbl1, |
8302 | "vtbl1"); |
8303 | } |
8304 | case NEON::BI__builtin_neon_vtbl2_v: { |
8305 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr, |
8306 | Ops[2], Ty, Intrinsic::aarch64_neon_tbl1, |
8307 | "vtbl1"); |
8308 | } |
8309 | case NEON::BI__builtin_neon_vtbl3_v: { |
8310 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr, |
8311 | Ops[3], Ty, Intrinsic::aarch64_neon_tbl2, |
8312 | "vtbl2"); |
8313 | } |
8314 | case NEON::BI__builtin_neon_vtbl4_v: { |
8315 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr, |
8316 | Ops[4], Ty, Intrinsic::aarch64_neon_tbl2, |
8317 | "vtbl2"); |
8318 | } |
8319 | case NEON::BI__builtin_neon_vtbx1_v: { |
8320 | Value *TblRes = |
8321 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2], |
8322 | Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1"); |
8323 | |
8324 | llvm::Constant *EightV = ConstantInt::get(Ty, 8); |
8325 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV); |
8326 | CmpRes = Builder.CreateSExt(CmpRes, Ty); |
8327 | |
8328 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); |
8329 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); |
8330 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); |
8331 | } |
8332 | case NEON::BI__builtin_neon_vtbx2_v: { |
8333 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0], |
8334 | Ops[3], Ty, Intrinsic::aarch64_neon_tbx1, |
8335 | "vtbx1"); |
8336 | } |
8337 | case NEON::BI__builtin_neon_vtbx3_v: { |
8338 | Value *TblRes = |
8339 | packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4], |
8340 | Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2"); |
8341 | |
8342 | llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24); |
8343 | Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4], |
8344 | TwentyFourV); |
8345 | CmpRes = Builder.CreateSExt(CmpRes, Ty); |
8346 | |
8347 | Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); |
8348 | Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes); |
8349 | return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx"); |
8350 | } |
8351 | case NEON::BI__builtin_neon_vtbx4_v: { |
8352 | return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0], |
8353 | Ops[5], Ty, Intrinsic::aarch64_neon_tbx2, |
8354 | "vtbx2"); |
8355 | } |
8356 | case NEON::BI__builtin_neon_vqtbl1_v: |
8357 | case NEON::BI__builtin_neon_vqtbl1q_v: |
8358 | Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break; |
8359 | case NEON::BI__builtin_neon_vqtbl2_v: |
8360 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
8361 | Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break; |
8362 | case NEON::BI__builtin_neon_vqtbl3_v: |
8363 | case NEON::BI__builtin_neon_vqtbl3q_v: |
8364 | Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break; |
8365 | case NEON::BI__builtin_neon_vqtbl4_v: |
8366 | case NEON::BI__builtin_neon_vqtbl4q_v: |
8367 | Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break; |
8368 | case NEON::BI__builtin_neon_vqtbx1_v: |
8369 | case NEON::BI__builtin_neon_vqtbx1q_v: |
8370 | Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break; |
8371 | case NEON::BI__builtin_neon_vqtbx2_v: |
8372 | case NEON::BI__builtin_neon_vqtbx2q_v: |
8373 | Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break; |
8374 | case NEON::BI__builtin_neon_vqtbx3_v: |
8375 | case NEON::BI__builtin_neon_vqtbx3q_v: |
8376 | Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break; |
8377 | case NEON::BI__builtin_neon_vqtbx4_v: |
8378 | case NEON::BI__builtin_neon_vqtbx4q_v: |
8379 | Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break; |
8380 | } |
8381 | } |
8382 | |
8383 | if (!Int) |
8384 | return nullptr; |
8385 | |
8386 | Function *F = CGF.CGM.getIntrinsic(Int, Ty); |
8387 | return CGF.EmitNeonCall(F, Ops, s); |
8388 | } |
8389 | |
8390 | Value *CodeGenFunction::vectorWrapScalar16(Value *Op) { |
8391 | auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
8392 | Op = Builder.CreateBitCast(Op, Int16Ty); |
8393 | Value *V = UndefValue::get(VTy); |
8394 | llvm::Constant *CI = ConstantInt::get(SizeTy, 0); |
8395 | Op = Builder.CreateInsertElement(V, Op, CI); |
8396 | return Op; |
8397 | } |
8398 | |
8399 | |
8400 | |
8401 | |
8402 | llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) { |
8403 | switch (TypeFlags.getMemEltType()) { |
8404 | case SVETypeFlags::MemEltTyDefault: |
8405 | return getEltType(TypeFlags); |
8406 | case SVETypeFlags::MemEltTyInt8: |
8407 | return Builder.getInt8Ty(); |
8408 | case SVETypeFlags::MemEltTyInt16: |
8409 | return Builder.getInt16Ty(); |
8410 | case SVETypeFlags::MemEltTyInt32: |
8411 | return Builder.getInt32Ty(); |
8412 | case SVETypeFlags::MemEltTyInt64: |
8413 | return Builder.getInt64Ty(); |
8414 | } |
8415 | llvm_unreachable("Unknown MemEltType"); |
8416 | } |
8417 | |
8418 | llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) { |
8419 | switch (TypeFlags.getEltType()) { |
8420 | default: |
8421 | llvm_unreachable("Invalid SVETypeFlag!"); |
8422 | |
8423 | case SVETypeFlags::EltTyInt8: |
8424 | return Builder.getInt8Ty(); |
8425 | case SVETypeFlags::EltTyInt16: |
8426 | return Builder.getInt16Ty(); |
8427 | case SVETypeFlags::EltTyInt32: |
8428 | return Builder.getInt32Ty(); |
8429 | case SVETypeFlags::EltTyInt64: |
8430 | return Builder.getInt64Ty(); |
8431 | |
8432 | case SVETypeFlags::EltTyFloat16: |
8433 | return Builder.getHalfTy(); |
8434 | case SVETypeFlags::EltTyFloat32: |
8435 | return Builder.getFloatTy(); |
8436 | case SVETypeFlags::EltTyFloat64: |
8437 | return Builder.getDoubleTy(); |
8438 | |
8439 | case SVETypeFlags::EltTyBFloat16: |
8440 | return Builder.getBFloatTy(); |
8441 | |
8442 | case SVETypeFlags::EltTyBool8: |
8443 | case SVETypeFlags::EltTyBool16: |
8444 | case SVETypeFlags::EltTyBool32: |
8445 | case SVETypeFlags::EltTyBool64: |
8446 | return Builder.getInt1Ty(); |
8447 | } |
8448 | } |
8449 | |
8450 | |
8451 | |
8452 | llvm::ScalableVectorType * |
8453 | CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) { |
8454 | switch (TypeFlags.getEltType()) { |
8455 | default: llvm_unreachable("Unhandled SVETypeFlag!"); |
8456 | |
8457 | case SVETypeFlags::EltTyInt8: |
8458 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
8459 | case SVETypeFlags::EltTyInt16: |
8460 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8461 | case SVETypeFlags::EltTyInt32: |
8462 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8463 | case SVETypeFlags::EltTyInt64: |
8464 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8465 | |
8466 | case SVETypeFlags::EltTyBFloat16: |
8467 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8468 | case SVETypeFlags::EltTyFloat16: |
8469 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8470 | case SVETypeFlags::EltTyFloat32: |
8471 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8472 | case SVETypeFlags::EltTyFloat64: |
8473 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8474 | |
8475 | case SVETypeFlags::EltTyBool8: |
8476 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
8477 | case SVETypeFlags::EltTyBool16: |
8478 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8479 | case SVETypeFlags::EltTyBool32: |
8480 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8481 | case SVETypeFlags::EltTyBool64: |
8482 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8483 | } |
8484 | } |
8485 | |
8486 | |
8487 | llvm::ScalableVectorType * |
8488 | CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) { |
8489 | switch (TypeFlags.getEltType()) { |
8490 | default: |
8491 | llvm_unreachable("Invalid SVETypeFlag!"); |
8492 | |
8493 | case SVETypeFlags::EltTyInt8: |
8494 | return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16); |
8495 | case SVETypeFlags::EltTyInt16: |
8496 | return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8); |
8497 | case SVETypeFlags::EltTyInt32: |
8498 | return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4); |
8499 | case SVETypeFlags::EltTyInt64: |
8500 | return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2); |
8501 | |
8502 | case SVETypeFlags::EltTyFloat16: |
8503 | return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8); |
8504 | case SVETypeFlags::EltTyBFloat16: |
8505 | return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8); |
8506 | case SVETypeFlags::EltTyFloat32: |
8507 | return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4); |
8508 | case SVETypeFlags::EltTyFloat64: |
8509 | return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2); |
8510 | |
8511 | case SVETypeFlags::EltTyBool8: |
8512 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); |
8513 | case SVETypeFlags::EltTyBool16: |
8514 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8); |
8515 | case SVETypeFlags::EltTyBool32: |
8516 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4); |
8517 | case SVETypeFlags::EltTyBool64: |
8518 | return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2); |
8519 | } |
8520 | } |
8521 | |
8522 | llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) { |
8523 | Function *Ptrue = |
8524 | CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags)); |
8525 | return Builder.CreateCall(Ptrue, {Builder.getInt32( 31)}); |
8526 | } |
8527 | |
8528 | constexpr unsigned SVEBitsPerBlock = 128; |
8529 | |
8530 | static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) { |
8531 | unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits(); |
8532 | return llvm::ScalableVectorType::get(EltTy, NumElts); |
8533 | } |
8534 | |
8535 | |
8536 | |
8537 | Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred, |
8538 | llvm::ScalableVectorType *VTy) { |
8539 | auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy); |
8540 | if (Pred->getType() == RTy) |
8541 | return Pred; |
8542 | |
8543 | unsigned IntID; |
8544 | llvm::Type *IntrinsicTy; |
8545 | switch (VTy->getMinNumElements()) { |
8546 | default: |
8547 | llvm_unreachable("unsupported element count!"); |
8548 | case 2: |
8549 | case 4: |
8550 | case 8: |
8551 | IntID = Intrinsic::aarch64_sve_convert_from_svbool; |
8552 | IntrinsicTy = RTy; |
8553 | break; |
8554 | case 16: |
8555 | IntID = Intrinsic::aarch64_sve_convert_to_svbool; |
8556 | IntrinsicTy = Pred->getType(); |
8557 | break; |
8558 | } |
8559 | |
8560 | Function *F = CGM.getIntrinsic(IntID, IntrinsicTy); |
8561 | Value *C = Builder.CreateCall(F, Pred); |
8562 | assert(C->getType() == RTy && "Unexpected return type!"); |
8563 | return C; |
8564 | } |
8565 | |
8566 | Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags, |
8567 | SmallVectorImpl<Value *> &Ops, |
8568 | unsigned IntID) { |
8569 | auto *ResultTy = getSVEType(TypeFlags); |
8570 | auto *OverloadedTy = |
8571 | llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy); |
8572 | |
8573 | |
8574 | |
8575 | |
8576 | |
8577 | |
8578 | Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy); |
8579 | |
8580 | Function *F = nullptr; |
8581 | if (Ops[1]->getType()->isVectorTy()) |
8582 | |
8583 | |
8584 | |
8585 | F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()}); |
8586 | else |
8587 | |
8588 | |
8589 | |
8590 | |
8591 | F = CGM.getIntrinsic(IntID, OverloadedTy); |
8592 | |
8593 | |
8594 | |
8595 | |
8596 | if (Ops.size() == 2) { |
8597 | assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset"); |
8598 | Ops.push_back(ConstantInt::get(Int64Ty, 0)); |
8599 | } |
8600 | |
8601 | |
8602 | |
8603 | if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) { |
8604 | unsigned BytesPerElt = |
8605 | OverloadedTy->getElementType()->getScalarSizeInBits() / 8; |
8606 | Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt); |
8607 | Ops[2] = Builder.CreateMul(Ops[2], Scale); |
8608 | } |
8609 | |
8610 | Value *Call = Builder.CreateCall(F, Ops); |
8611 | |
8612 | |
8613 | |
8614 | return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy) |
8615 | : Builder.CreateSExt(Call, ResultTy); |
8616 | } |
8617 | |
8618 | Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags, |
8619 | SmallVectorImpl<Value *> &Ops, |
8620 | unsigned IntID) { |
8621 | auto *SrcDataTy = getSVEType(TypeFlags); |
8622 | auto *OverloadedTy = |
8623 | llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy); |
8624 | |
8625 | |
8626 | |
8627 | Ops.insert(Ops.begin(), Ops.pop_back_val()); |
8628 | |
8629 | Function *F = nullptr; |
8630 | if (Ops[2]->getType()->isVectorTy()) |
8631 | |
8632 | |
8633 | |
8634 | F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()}); |
8635 | else |
8636 | |
8637 | |
8638 | |
8639 | |
8640 | F = CGM.getIntrinsic(IntID, OverloadedTy); |
8641 | |
8642 | |
8643 | |
8644 | |
8645 | if (Ops.size() == 3) { |
8646 | assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset"); |
8647 | Ops.push_back(ConstantInt::get(Int64Ty, 0)); |
8648 | } |
8649 | |
8650 | |
8651 | |
8652 | Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy); |
8653 | |
8654 | |
8655 | |
8656 | |
8657 | |
8658 | |
8659 | Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy); |
8660 | |
8661 | |
8662 | |
8663 | if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) { |
8664 | unsigned BytesPerElt = |
8665 | OverloadedTy->getElementType()->getScalarSizeInBits() / 8; |
8666 | Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt); |
8667 | Ops[3] = Builder.CreateMul(Ops[3], Scale); |
8668 | } |
8669 | |
8670 | return Builder.CreateCall(F, Ops); |
8671 | } |
8672 | |
8673 | Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags, |
8674 | SmallVectorImpl<Value *> &Ops, |
8675 | unsigned IntID) { |
8676 | |
8677 | |
8678 | auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType()); |
8679 | if (!OverloadedTy) |
8680 | OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType()); |
8681 | |
8682 | |
8683 | Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy); |
8684 | |
8685 | |
8686 | if (Ops[1]->getType()->isVectorTy()) { |
8687 | if (Ops.size() == 3) { |
8688 | |
8689 | Ops.push_back(ConstantInt::get(Int64Ty, 0)); |
8690 | |
8691 | |
8692 | std::swap(Ops[2], Ops[3]); |
8693 | } else { |
8694 | |
8695 | llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags); |
8696 | unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8; |
8697 | Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt); |
8698 | Ops[2] = Builder.CreateMul(Ops[2], Scale); |
8699 | } |
8700 | } |
8701 | |
8702 | Function *F = CGM.getIntrinsic(IntID, OverloadedTy); |
8703 | return Builder.CreateCall(F, Ops); |
8704 | } |
8705 | |
8706 | Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags, |
8707 | SmallVectorImpl<Value*> &Ops, |
8708 | unsigned IntID) { |
8709 | llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); |
8710 | auto VecPtrTy = llvm::PointerType::getUnqual(VTy); |
8711 | auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
8712 | |
8713 | unsigned N; |
8714 | switch (IntID) { |
8715 | case Intrinsic::aarch64_sve_ld2: |
8716 | N = 2; |
8717 | break; |
8718 | case Intrinsic::aarch64_sve_ld3: |
8719 | N = 3; |
8720 | break; |
8721 | case Intrinsic::aarch64_sve_ld4: |
8722 | N = 4; |
8723 | break; |
8724 | default: |
8725 | llvm_unreachable("unknown intrinsic!"); |
8726 | } |
8727 | auto RetTy = llvm::VectorType::get(VTy->getElementType(), |
8728 | VTy->getElementCount() * N); |
8729 | |
8730 | Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); |
8731 | Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy); |
8732 | Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0); |
8733 | BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset); |
8734 | BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy); |
8735 | |
8736 | Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()}); |
8737 | return Builder.CreateCall(F, { Predicate, BasePtr }); |
8738 | } |
8739 | |
8740 | Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags, |
8741 | SmallVectorImpl<Value*> &Ops, |
8742 | unsigned IntID) { |
8743 | llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); |
8744 | auto VecPtrTy = llvm::PointerType::getUnqual(VTy); |
8745 | auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType()); |
8746 | |
8747 | unsigned N; |
8748 | switch (IntID) { |
8749 | case Intrinsic::aarch64_sve_st2: |
8750 | N = 2; |
8751 | break; |
8752 | case Intrinsic::aarch64_sve_st3: |
8753 | N = 3; |
8754 | break; |
8755 | case Intrinsic::aarch64_sve_st4: |
8756 | N = 4; |
8757 | break; |
8758 | default: |
8759 | llvm_unreachable("unknown intrinsic!"); |
8760 | } |
8761 | auto TupleTy = |
8762 | llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N); |
8763 | |
8764 | Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); |
8765 | Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy); |
8766 | Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0); |
8767 | Value *Val = Ops.back(); |
8768 | BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset); |
8769 | BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy); |
8770 | |
8771 | |
8772 | |
8773 | SmallVector<llvm::Value*, 5> Operands; |
8774 | Function *FExtr = |
8775 | CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy}); |
8776 | for (unsigned I = 0; I < N; ++I) |
8777 | Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)})); |
8778 | Operands.append({Predicate, BasePtr}); |
8779 | |
8780 | Function *F = CGM.getIntrinsic(IntID, { VTy }); |
8781 | return Builder.CreateCall(F, Operands); |
8782 | } |
8783 | |
8784 | |
8785 | |
8786 | |
8787 | Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags, |
8788 | SmallVectorImpl<Value *> &Ops, |
8789 | unsigned BuiltinID) { |
8790 | |
8791 | if (TypeFlags.hasSplatOperand()) { |
8792 | unsigned OpNo = TypeFlags.getSplatOperand(); |
8793 | Ops[OpNo] = EmitSVEDupX(Ops[OpNo]); |
8794 | } |
8795 | |
8796 | |
8797 | Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType()); |
8798 | Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]}); |
8799 | |
8800 | |
8801 | llvm::ScalableVectorType *Ty = getSVEType(TypeFlags); |
8802 | return EmitSVEReinterpret(Call, Ty); |
8803 | } |
8804 | |
8805 | Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags, |
8806 | ArrayRef<Value *> Ops, unsigned BuiltinID) { |
8807 | llvm::Type *OverloadedTy = getSVEType(TypeFlags); |
8808 | Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy); |
8809 | return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)}); |
8810 | } |
8811 | |
8812 | Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags, |
8813 | SmallVectorImpl<Value *> &Ops, |
8814 | unsigned BuiltinID) { |
8815 | auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags); |
8816 | auto *VectorTy = getSVEVectorForElementType(MemEltTy); |
8817 | auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy); |
8818 | |
8819 | Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); |
8820 | Value *BasePtr = Ops[1]; |
8821 | |
8822 | |
8823 | if (Ops.size() > 3) { |
8824 | BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo()); |
8825 | BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]); |
8826 | } |
8827 | |
8828 | |
8829 | BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty)); |
8830 | Value *PrfOp = Ops.back(); |
8831 | |
8832 | Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType()); |
8833 | return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp}); |
8834 | } |
8835 | |
8836 | Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E, |
8837 | llvm::Type *ReturnTy, |
8838 | SmallVectorImpl<Value *> &Ops, |
8839 | unsigned BuiltinID, |
8840 | bool IsZExtReturn) { |
8841 | QualType LangPTy = E->getArg(1)->getType(); |
8842 | llvm::Type *MemEltTy = CGM.getTypes().ConvertType( |
8843 | LangPTy->castAs<PointerType>()->getPointeeType()); |
8844 | |
8845 | |
8846 | |
8847 | auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy); |
8848 | auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy); |
8849 | |
8850 | Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); |
8851 | Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo()); |
8852 | Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0); |
8853 | BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset); |
8854 | |
8855 | BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo()); |
8856 | Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy); |
8857 | Value *Load = Builder.CreateCall(F, {Predicate, BasePtr}); |
8858 | |
8859 | return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy) |
8860 | : Builder.CreateSExt(Load, VectorTy); |
8861 | } |
8862 | |
8863 | Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E, |
8864 | SmallVectorImpl<Value *> &Ops, |
8865 | unsigned BuiltinID) { |
8866 | QualType LangPTy = E->getArg(1)->getType(); |
8867 | llvm::Type *MemEltTy = CGM.getTypes().ConvertType( |
8868 | LangPTy->castAs<PointerType>()->getPointeeType()); |
8869 | |
8870 | |
8871 | |
8872 | auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType()); |
8873 | auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy); |
8874 | |
8875 | Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); |
8876 | Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo()); |
8877 | Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0); |
8878 | BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset); |
8879 | |
8880 | |
8881 | llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy); |
8882 | |
8883 | BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo()); |
8884 | Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy); |
8885 | return Builder.CreateCall(F, {Val, Predicate, BasePtr}); |
8886 | } |
8887 | |
8888 | |
8889 | |
8890 | Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) { |
8891 | auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty); |
8892 | return Builder.CreateCall(F, Scalar); |
8893 | } |
8894 | |
8895 | Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) { |
8896 | return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType())); |
8897 | } |
8898 | |
8899 | Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) { |
8900 | |
8901 | |
8902 | |
8903 | |
8904 | |
8905 | |
8906 | return Builder.CreateBitCast(Val, Ty); |
8907 | } |
8908 | |
8909 | static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty, |
8910 | SmallVectorImpl<Value *> &Ops) { |
8911 | auto *SplatZero = Constant::getNullValue(Ty); |
8912 | Ops.insert(Ops.begin(), SplatZero); |
8913 | } |
8914 | |
8915 | static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty, |
8916 | SmallVectorImpl<Value *> &Ops) { |
8917 | auto *SplatUndef = UndefValue::get(Ty); |
8918 | Ops.insert(Ops.begin(), SplatUndef); |
8919 | } |
8920 | |
8921 | SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes( |
8922 | SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) { |
8923 | if (TypeFlags.isOverloadNone()) |
8924 | return {}; |
8925 | |
8926 | llvm::Type *DefaultType = getSVEType(TypeFlags); |
8927 | |
8928 | if (TypeFlags.isOverloadWhile()) |
8929 | return {DefaultType, Ops[1]->getType()}; |
8930 | |
8931 | if (TypeFlags.isOverloadWhileRW()) |
8932 | return {getSVEPredType(TypeFlags), Ops[0]->getType()}; |
8933 | |
8934 | if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet()) |
8935 | return {Ops[0]->getType(), Ops.back()->getType()}; |
8936 | |
8937 | if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet()) |
8938 | return {ResultType, Ops[0]->getType()}; |
8939 | |
8940 | assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads"); |
8941 | return {DefaultType}; |
8942 | } |
8943 | |
8944 | Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, |
8945 | const CallExpr *E) { |
8946 | |
8947 | unsigned ICEArguments = 0; |
8948 | ASTContext::GetBuiltinTypeError Error; |
8949 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
8950 | assert(Error == ASTContext::GE_None && "Should not codegen an error"); |
8951 | |
8952 | llvm::Type *Ty = ConvertType(E->getType()); |
8953 | if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 && |
8954 | BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) { |
8955 | Value *Val = EmitScalarExpr(E->getArg(0)); |
8956 | return EmitSVEReinterpret(Val, Ty); |
8957 | } |
8958 | |
8959 | llvm::SmallVector<Value *, 4> Ops; |
8960 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
8961 | if ((ICEArguments & (1 << i)) == 0) |
8962 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
8963 | else { |
8964 | |
8965 | |
8966 | Optional<llvm::APSInt> Result = |
8967 | E->getArg(i)->getIntegerConstantExpr(getContext()); |
8968 | assert(Result && "Expected argument to be a constant"); |
8969 | |
8970 | |
8971 | |
8972 | |
8973 | *Result = Result->extOrTrunc(32); |
8974 | Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result)); |
8975 | } |
8976 | } |
8977 | |
8978 | auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID, |
8979 | AArch64SVEIntrinsicsProvenSorted); |
8980 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
8981 | if (TypeFlags.isLoad()) |
8982 | return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic, |
8983 | TypeFlags.isZExtReturn()); |
8984 | else if (TypeFlags.isStore()) |
8985 | return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic); |
8986 | else if (TypeFlags.isGatherLoad()) |
8987 | return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8988 | else if (TypeFlags.isScatterStore()) |
8989 | return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8990 | else if (TypeFlags.isPrefetch()) |
8991 | return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8992 | else if (TypeFlags.isGatherPrefetch()) |
8993 | return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8994 | else if (TypeFlags.isStructLoad()) |
8995 | return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8996 | else if (TypeFlags.isStructStore()) |
8997 | return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); |
8998 | else if (TypeFlags.isUndef()) |
8999 | return UndefValue::get(Ty); |
9000 | else if (Builtin->LLVMIntrinsic != 0) { |
9001 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp) |
9002 | InsertExplicitZeroOperand(Builder, Ty, Ops); |
9003 | |
9004 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp) |
9005 | InsertExplicitUndefOperand(Builder, Ty, Ops); |
9006 | |
9007 | |
9008 | |
9009 | if (TypeFlags.isAppendSVALL()) |
9010 | Ops.push_back(Builder.getInt32( 31)); |
9011 | if (TypeFlags.isInsertOp1SVALL()) |
9012 | Ops.insert(&Ops[1], Builder.getInt32( 31)); |
9013 | |
9014 | |
9015 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
9016 | if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType())) |
9017 | if (PredTy->getElementType()->isIntegerTy(1)) |
9018 | Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags)); |
9019 | |
9020 | |
9021 | if (TypeFlags.hasSplatOperand()) { |
9022 | unsigned OpNo = TypeFlags.getSplatOperand(); |
9023 | Ops[OpNo] = EmitSVEDupX(Ops[OpNo]); |
9024 | } |
9025 | |
9026 | if (TypeFlags.isReverseCompare()) |
9027 | std::swap(Ops[1], Ops[2]); |
9028 | |
9029 | if (TypeFlags.isReverseUSDOT()) |
9030 | std::swap(Ops[1], Ops[2]); |
9031 | |
9032 | |
9033 | if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) { |
9034 | llvm::Type *OpndTy = Ops[1]->getType(); |
9035 | auto *SplatZero = Constant::getNullValue(OpndTy); |
9036 | Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy); |
9037 | Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero}); |
9038 | } |
9039 | |
9040 | Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic, |
9041 | getSVEOverloadTypes(TypeFlags, Ty, Ops)); |
9042 | Value *Call = Builder.CreateCall(F, Ops); |
9043 | |
9044 | |
9045 | if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType())) |
9046 | if (PredTy->getScalarType()->isIntegerTy(1)) |
9047 | Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty)); |
9048 | |
9049 | return Call; |
9050 | } |
9051 | |
9052 | switch (BuiltinID) { |
9053 | default: |
9054 | return nullptr; |
9055 | |
9056 | case SVE::BI__builtin_sve_svmov_b_z: { |
9057 | |
9058 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9059 | llvm::Type* OverloadedTy = getSVEType(TypeFlags); |
9060 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy); |
9061 | return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]}); |
9062 | } |
9063 | |
9064 | case SVE::BI__builtin_sve_svnot_b_z: { |
9065 | |
9066 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9067 | llvm::Type* OverloadedTy = getSVEType(TypeFlags); |
9068 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy); |
9069 | return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]}); |
9070 | } |
9071 | |
9072 | case SVE::BI__builtin_sve_svmovlb_u16: |
9073 | case SVE::BI__builtin_sve_svmovlb_u32: |
9074 | case SVE::BI__builtin_sve_svmovlb_u64: |
9075 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb); |
9076 | |
9077 | case SVE::BI__builtin_sve_svmovlb_s16: |
9078 | case SVE::BI__builtin_sve_svmovlb_s32: |
9079 | case SVE::BI__builtin_sve_svmovlb_s64: |
9080 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb); |
9081 | |
9082 | case SVE::BI__builtin_sve_svmovlt_u16: |
9083 | case SVE::BI__builtin_sve_svmovlt_u32: |
9084 | case SVE::BI__builtin_sve_svmovlt_u64: |
9085 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt); |
9086 | |
9087 | case SVE::BI__builtin_sve_svmovlt_s16: |
9088 | case SVE::BI__builtin_sve_svmovlt_s32: |
9089 | case SVE::BI__builtin_sve_svmovlt_s64: |
9090 | return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt); |
9091 | |
9092 | case SVE::BI__builtin_sve_svpmullt_u16: |
9093 | case SVE::BI__builtin_sve_svpmullt_u64: |
9094 | case SVE::BI__builtin_sve_svpmullt_n_u16: |
9095 | case SVE::BI__builtin_sve_svpmullt_n_u64: |
9096 | return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair); |
9097 | |
9098 | case SVE::BI__builtin_sve_svpmullb_u16: |
9099 | case SVE::BI__builtin_sve_svpmullb_u64: |
9100 | case SVE::BI__builtin_sve_svpmullb_n_u16: |
9101 | case SVE::BI__builtin_sve_svpmullb_n_u64: |
9102 | return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair); |
9103 | |
9104 | case SVE::BI__builtin_sve_svdup_n_b8: |
9105 | case SVE::BI__builtin_sve_svdup_n_b16: |
9106 | case SVE::BI__builtin_sve_svdup_n_b32: |
9107 | case SVE::BI__builtin_sve_svdup_n_b64: { |
9108 | Value *CmpNE = |
9109 | Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType())); |
9110 | llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags); |
9111 | Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy); |
9112 | return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty)); |
9113 | } |
9114 | |
9115 | case SVE::BI__builtin_sve_svdupq_n_b8: |
9116 | case SVE::BI__builtin_sve_svdupq_n_b16: |
9117 | case SVE::BI__builtin_sve_svdupq_n_b32: |
9118 | case SVE::BI__builtin_sve_svdupq_n_b64: |
9119 | case SVE::BI__builtin_sve_svdupq_n_u8: |
9120 | case SVE::BI__builtin_sve_svdupq_n_s8: |
9121 | case SVE::BI__builtin_sve_svdupq_n_u64: |
9122 | case SVE::BI__builtin_sve_svdupq_n_f64: |
9123 | case SVE::BI__builtin_sve_svdupq_n_s64: |
9124 | case SVE::BI__builtin_sve_svdupq_n_u16: |
9125 | case SVE::BI__builtin_sve_svdupq_n_f16: |
9126 | case SVE::BI__builtin_sve_svdupq_n_bf16: |
9127 | case SVE::BI__builtin_sve_svdupq_n_s16: |
9128 | case SVE::BI__builtin_sve_svdupq_n_u32: |
9129 | case SVE::BI__builtin_sve_svdupq_n_f32: |
9130 | case SVE::BI__builtin_sve_svdupq_n_s32: { |
9131 | |
9132 | |
9133 | unsigned NumOpnds = Ops.size(); |
9134 | |
9135 | bool IsBoolTy = |
9136 | cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1); |
9137 | |
9138 | |
9139 | |
9140 | |
9141 | llvm::Type *EltTy = Ops[0]->getType(); |
9142 | if (IsBoolTy) |
9143 | EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds); |
9144 | |
9145 | SmallVector<llvm::Value *, 16> VecOps; |
9146 | for (unsigned I = 0; I < NumOpnds; ++I) |
9147 | VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy)); |
9148 | Value *Vec = BuildVector(VecOps); |
9149 | |
9150 | SVETypeFlags TypeFlags(Builtin->TypeModifier); |
9151 | Value *Pred = EmitSVEAllTruePred(TypeFlags); |
9152 | |
9153 | llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy); |
9154 | Value *InsertSubVec = Builder.CreateInsertVector( |
9155 | OverloadedTy, UndefValue::get(OverloadedTy), Vec, Builder.getInt64(0)); |
9156 | |
9157 | Function *F = |
9158 | CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy); |
9159 | Value *DupQLane = |
9160 | Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)}); |
9161 | |
9162 | if (!IsBoolTy) |
9163 | return DupQLane; |
9164 | |
9165 | |
9166 | F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne |
9167 | : Intrinsic::aarch64_sve_cmpne_wide, |
9168 | OverloadedTy); |
9169 | Value *Call = Builder.CreateCall( |
9170 | F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))}); |
9171 | return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty)); |
9172 | } |
9173 | |
9174 | case SVE::BI__builtin_sve_svpfalse_b: |
9175 | return ConstantInt::getFalse(Ty); |
9176 | |
9177 | case SVE::BI__builtin_sve_svlen_bf16: |
9178 | case SVE::BI__builtin_sve_svlen_f16: |
9179 | case SVE::BI__builtin_sve_svlen_f32: |
9180 | case SVE::BI__builtin_sve_svlen_f64: |
9181 | case SVE::BI__builtin_sve_svlen_s8: |
9182 | case SVE::BI__builtin_sve_svlen_s16: |
9183 | case SVE::BI__builtin_sve_svlen_s32: |
9184 | case SVE::BI__builtin_sve_svlen_s64: |
9185 | case SVE::BI__builtin_sve_svlen_u8: |
9186 | case SVE::BI__builtin_sve_svlen_u16: |
9187 | case SVE::BI__builtin_sve_svlen_u32: |
9188 | case SVE::BI__builtin_sve_svlen_u64: { |
9189 | SVETypeFlags TF(Builtin->TypeModifier); |
9190 | auto VTy = cast<llvm::VectorType>(getSVEType(TF)); |
9191 | auto *NumEls = |
9192 | llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue()); |
9193 | |
9194 | Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty); |
9195 | return Builder.CreateMul(NumEls, Builder.CreateCall(F)); |
9196 | } |
9197 | |
9198 | case SVE::BI__builtin_sve_svtbl2_u8: |
9199 | case SVE::BI__builtin_sve_svtbl2_s8: |
9200 | case SVE::BI__builtin_sve_svtbl2_u16: |
9201 | case SVE::BI__builtin_sve_svtbl2_s16: |
9202 | case SVE::BI__builtin_sve_svtbl2_u32: |
9203 | case SVE::BI__builtin_sve_svtbl2_s32: |
9204 | case SVE::BI__builtin_sve_svtbl2_u64: |
9205 | case SVE::BI__builtin_sve_svtbl2_s64: |
9206 | case SVE::BI__builtin_sve_svtbl2_f16: |
9207 | case SVE::BI__builtin_sve_svtbl2_bf16: |
9208 | case SVE::BI__builtin_sve_svtbl2_f32: |
9209 | case SVE::BI__builtin_sve_svtbl2_f64: { |
9210 | SVETypeFlags TF(Builtin->TypeModifier); |
9211 | auto VTy = cast<llvm::VectorType>(getSVEType(TF)); |
9212 | auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy); |
9213 | Function *FExtr = |
9214 | CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy}); |
9215 | Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)}); |
9216 | Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)}); |
9217 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy); |
9218 | return Builder.CreateCall(F, {V0, V1, Ops[1]}); |
9219 | } |
9220 | } |
9221 | |
9222 | |
9223 | return nullptr; |
9224 | } |
9225 | |
9226 | Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, |
9227 | const CallExpr *E, |
9228 | llvm::Triple::ArchType Arch) { |
9229 | if (BuiltinID >= AArch64::FirstSVEBuiltin && |
9230 | BuiltinID <= AArch64::LastSVEBuiltin) |
9231 | return EmitAArch64SVEBuiltinExpr(BuiltinID, E); |
9232 | |
9233 | unsigned HintID = static_cast<unsigned>(-1); |
9234 | switch (BuiltinID) { |
9235 | default: break; |
9236 | case AArch64::BI__builtin_arm_nop: |
9237 | HintID = 0; |
9238 | break; |
9239 | case AArch64::BI__builtin_arm_yield: |
9240 | case AArch64::BI__yield: |
9241 | HintID = 1; |
9242 | break; |
9243 | case AArch64::BI__builtin_arm_wfe: |
9244 | case AArch64::BI__wfe: |
9245 | HintID = 2; |
9246 | break; |
9247 | case AArch64::BI__builtin_arm_wfi: |
9248 | case AArch64::BI__wfi: |
9249 | HintID = 3; |
9250 | break; |
9251 | case AArch64::BI__builtin_arm_sev: |
9252 | case AArch64::BI__sev: |
9253 | HintID = 4; |
9254 | break; |
9255 | case AArch64::BI__builtin_arm_sevl: |
9256 | case AArch64::BI__sevl: |
9257 | HintID = 5; |
9258 | break; |
9259 | } |
9260 | |
9261 | if (HintID != static_cast<unsigned>(-1)) { |
9262 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint); |
9263 | return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID)); |
9264 | } |
9265 | |
9266 | if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { |
9267 | Value *Address = EmitScalarExpr(E->getArg(0)); |
9268 | Value *RW = EmitScalarExpr(E->getArg(1)); |
9269 | Value *CacheLevel = EmitScalarExpr(E->getArg(2)); |
9270 | Value *RetentionPolicy = EmitScalarExpr(E->getArg(3)); |
9271 | Value *IsData = EmitScalarExpr(E->getArg(4)); |
9272 | |
9273 | Value *Locality = nullptr; |
9274 | if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) { |
9275 | |
9276 | Locality = llvm::ConstantInt::get(Int32Ty, |
9277 | -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3); |
9278 | } else { |
9279 | |
9280 | Locality = llvm::ConstantInt::get(Int32Ty, 0); |
9281 | } |
9282 | |
9283 | |
9284 | |
9285 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
9286 | return Builder.CreateCall(F, {Address, RW, Locality, IsData}); |
9287 | } |
9288 | |
9289 | if (BuiltinID == AArch64::BI__builtin_arm_rbit) { |
9290 | assert((getContext().getTypeSize(E->getType()) == 32) && |
9291 | "rbit of unusual size!"); |
9292 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9293 | return Builder.CreateCall( |
9294 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
9295 | } |
9296 | if (BuiltinID == AArch64::BI__builtin_arm_rbit64) { |
9297 | assert((getContext().getTypeSize(E->getType()) == 64) && |
9298 | "rbit of unusual size!"); |
9299 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9300 | return Builder.CreateCall( |
9301 | CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); |
9302 | } |
9303 | |
9304 | if (BuiltinID == AArch64::BI__builtin_arm_cls) { |
9305 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9306 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg, |
9307 | "cls"); |
9308 | } |
9309 | if (BuiltinID == AArch64::BI__builtin_arm_cls64) { |
9310 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9311 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg, |
9312 | "cls"); |
9313 | } |
9314 | |
9315 | if (BuiltinID == AArch64::BI__builtin_arm_frint32zf || |
9316 | BuiltinID == AArch64::BI__builtin_arm_frint32z) { |
9317 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9318 | llvm::Type *Ty = Arg->getType(); |
9319 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty), |
9320 | Arg, "frint32z"); |
9321 | } |
9322 | |
9323 | if (BuiltinID == AArch64::BI__builtin_arm_frint64zf || |
9324 | BuiltinID == AArch64::BI__builtin_arm_frint64z) { |
9325 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9326 | llvm::Type *Ty = Arg->getType(); |
9327 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty), |
9328 | Arg, "frint64z"); |
9329 | } |
9330 | |
9331 | if (BuiltinID == AArch64::BI__builtin_arm_frint32xf || |
9332 | BuiltinID == AArch64::BI__builtin_arm_frint32x) { |
9333 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9334 | llvm::Type *Ty = Arg->getType(); |
9335 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty), |
9336 | Arg, "frint32x"); |
9337 | } |
9338 | |
9339 | if (BuiltinID == AArch64::BI__builtin_arm_frint64xf || |
9340 | BuiltinID == AArch64::BI__builtin_arm_frint64x) { |
9341 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9342 | llvm::Type *Ty = Arg->getType(); |
9343 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty), |
9344 | Arg, "frint64x"); |
9345 | } |
9346 | |
9347 | if (BuiltinID == AArch64::BI__builtin_arm_jcvt) { |
9348 | assert((getContext().getTypeSize(E->getType()) == 32) && |
9349 | "__jcvt of unusual size!"); |
9350 | llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); |
9351 | return Builder.CreateCall( |
9352 | CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg); |
9353 | } |
9354 | |
9355 | if (BuiltinID == AArch64::BI__builtin_arm_ld64b || |
9356 | BuiltinID == AArch64::BI__builtin_arm_st64b || |
9357 | BuiltinID == AArch64::BI__builtin_arm_st64bv || |
9358 | BuiltinID == AArch64::BI__builtin_arm_st64bv0) { |
9359 | llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0)); |
9360 | llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1)); |
9361 | |
9362 | if (BuiltinID == AArch64::BI__builtin_arm_ld64b) { |
9363 | |
9364 | |
9365 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b); |
9366 | llvm::Value *Val = Builder.CreateCall(F, MemAddr); |
9367 | llvm::Value *ToRet; |
9368 | for (size_t i = 0; i < 8; i++) { |
9369 | llvm::Value *ValOffsetPtr = |
9370 | Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i)); |
9371 | Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8)); |
9372 | ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr); |
9373 | } |
9374 | return ToRet; |
9375 | } else { |
9376 | |
9377 | |
9378 | SmallVector<llvm::Value *, 9> Args; |
9379 | Args.push_back(MemAddr); |
9380 | for (size_t i = 0; i < 8; i++) { |
9381 | llvm::Value *ValOffsetPtr = |
9382 | Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i)); |
9383 | Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8)); |
9384 | Args.push_back(Builder.CreateLoad(Addr)); |
9385 | } |
9386 | |
9387 | auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b |
9388 | ? Intrinsic::aarch64_st64b |
9389 | : BuiltinID == AArch64::BI__builtin_arm_st64bv |
9390 | ? Intrinsic::aarch64_st64bv |
9391 | : Intrinsic::aarch64_st64bv0); |
9392 | Function *F = CGM.getIntrinsic(Intr); |
9393 | return Builder.CreateCall(F, Args); |
9394 | } |
9395 | } |
9396 | |
9397 | if (BuiltinID == AArch64::BI__builtin_arm_rndr || |
9398 | BuiltinID == AArch64::BI__builtin_arm_rndrrs) { |
9399 | |
9400 | auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr |
9401 | ? Intrinsic::aarch64_rndr |
9402 | : Intrinsic::aarch64_rndrrs); |
9403 | Function *F = CGM.getIntrinsic(Intr); |
9404 | llvm::Value *Val = Builder.CreateCall(F); |
9405 | Value *RandomValue = Builder.CreateExtractValue(Val, 0); |
9406 | Value *Status = Builder.CreateExtractValue(Val, 1); |
9407 | |
9408 | Address MemAddress = EmitPointerWithAlignment(E->getArg(0)); |
9409 | Builder.CreateStore(RandomValue, MemAddress); |
9410 | Status = Builder.CreateZExt(Status, Int32Ty); |
9411 | return Status; |
9412 | } |
9413 | |
9414 | if (BuiltinID == AArch64::BI__clear_cache) { |
9415 | assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); |
9416 | const FunctionDecl *FD = E->getDirectCallee(); |
9417 | Value *Ops[2]; |
9418 | for (unsigned i = 0; i < 2; i++) |
9419 | Ops[i] = EmitScalarExpr(E->getArg(i)); |
9420 | llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); |
9421 | llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); |
9422 | StringRef Name = FD->getName(); |
9423 | return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); |
9424 | } |
9425 | |
9426 | if ((BuiltinID == AArch64::BI__builtin_arm_ldrex || |
9427 | BuiltinID == AArch64::BI__builtin_arm_ldaex) && |
9428 | getContext().getTypeSize(E->getType()) == 128) { |
9429 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex |
9430 | ? Intrinsic::aarch64_ldaxp |
9431 | : Intrinsic::aarch64_ldxp); |
9432 | |
9433 | Value *LdPtr = EmitScalarExpr(E->getArg(0)); |
9434 | Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), |
9435 | "ldxp"); |
9436 | |
9437 | Value *Val0 = Builder.CreateExtractValue(Val, 1); |
9438 | Value *Val1 = Builder.CreateExtractValue(Val, 0); |
9439 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
9440 | Val0 = Builder.CreateZExt(Val0, Int128Ty); |
9441 | Val1 = Builder.CreateZExt(Val1, Int128Ty); |
9442 | |
9443 | Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64); |
9444 | Val = Builder.CreateShl(Val0, ShiftCst, "shl", true ); |
9445 | Val = Builder.CreateOr(Val, Val1); |
9446 | return Builder.CreateBitCast(Val, ConvertType(E->getType())); |
9447 | } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex || |
9448 | BuiltinID == AArch64::BI__builtin_arm_ldaex) { |
9449 | Value *LoadAddr = EmitScalarExpr(E->getArg(0)); |
9450 | |
9451 | QualType Ty = E->getType(); |
9452 | llvm::Type *RealResTy = ConvertType(Ty); |
9453 | llvm::Type *PtrTy = llvm::IntegerType::get( |
9454 | getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(); |
9455 | LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy); |
9456 | |
9457 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex |
9458 | ? Intrinsic::aarch64_ldaxr |
9459 | : Intrinsic::aarch64_ldxr, |
9460 | PtrTy); |
9461 | Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr"); |
9462 | |
9463 | if (RealResTy->isPointerTy()) |
9464 | return Builder.CreateIntToPtr(Val, RealResTy); |
9465 | |
9466 | llvm::Type *IntResTy = llvm::IntegerType::get( |
9467 | getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy)); |
9468 | Val = Builder.CreateTruncOrBitCast(Val, IntResTy); |
9469 | return Builder.CreateBitCast(Val, RealResTy); |
9470 | } |
9471 | |
9472 | if ((BuiltinID == AArch64::BI__builtin_arm_strex || |
9473 | BuiltinID == AArch64::BI__builtin_arm_stlex) && |
9474 | getContext().getTypeSize(E->getArg(0)->getType()) == 128) { |
9475 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex |
9476 | ? Intrinsic::aarch64_stlxp |
9477 | : Intrinsic::aarch64_stxp); |
9478 | llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty); |
9479 | |
9480 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
9481 | EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), true); |
9482 | |
9483 | Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy)); |
9484 | llvm::Value *Val = Builder.CreateLoad(Tmp); |
9485 | |
9486 | Value *Arg0 = Builder.CreateExtractValue(Val, 0); |
9487 | Value *Arg1 = Builder.CreateExtractValue(Val, 1); |
9488 | Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), |
9489 | Int8PtrTy); |
9490 | return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp"); |
9491 | } |
9492 | |
9493 | if (BuiltinID == AArch64::BI__builtin_arm_strex || |
9494 | BuiltinID == AArch64::BI__builtin_arm_stlex) { |
9495 | Value *StoreVal = EmitScalarExpr(E->getArg(0)); |
9496 | Value *StoreAddr = EmitScalarExpr(E->getArg(1)); |
9497 | |
9498 | QualType Ty = E->getArg(0)->getType(); |
9499 | llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), |
9500 | getContext().getTypeSize(Ty)); |
9501 | StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); |
9502 | |
9503 | if (StoreVal->getType()->isPointerTy()) |
9504 | StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty); |
9505 | else { |
9506 | llvm::Type *IntTy = llvm::IntegerType::get( |
9507 | getLLVMContext(), |
9508 | CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); |
9509 | StoreVal = Builder.CreateBitCast(StoreVal, IntTy); |
9510 | StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty); |
9511 | } |
9512 | |
9513 | Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex |
9514 | ? Intrinsic::aarch64_stlxr |
9515 | : Intrinsic::aarch64_stxr, |
9516 | StoreAddr->getType()); |
9517 | return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr"); |
9518 | } |
9519 | |
9520 | if (BuiltinID == AArch64::BI__getReg) { |
9521 | Expr::EvalResult Result; |
9522 | if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) |
9523 | llvm_unreachable("Sema will ensure that the parameter is constant"); |
9524 | |
9525 | llvm::APSInt Value = Result.Val.getInt(); |
9526 | LLVMContext &Context = CGM.getLLVMContext(); |
9527 | std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10); |
9528 | |
9529 | llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)}; |
9530 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
9531 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
9532 | |
9533 | llvm::Function *F = |
9534 | CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty}); |
9535 | return Builder.CreateCall(F, Metadata); |
9536 | } |
9537 | |
9538 | if (BuiltinID == AArch64::BI__builtin_arm_clrex) { |
9539 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex); |
9540 | return Builder.CreateCall(F); |
9541 | } |
9542 | |
9543 | if (BuiltinID == AArch64::BI_ReadWriteBarrier) |
9544 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
9545 | llvm::SyncScope::SingleThread); |
9546 | |
9547 | |
9548 | Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; |
9549 | switch (BuiltinID) { |
9550 | case AArch64::BI__builtin_arm_crc32b: |
9551 | CRCIntrinsicID = Intrinsic::aarch64_crc32b; break; |
9552 | case AArch64::BI__builtin_arm_crc32cb: |
9553 | CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break; |
9554 | case AArch64::BI__builtin_arm_crc32h: |
9555 | CRCIntrinsicID = Intrinsic::aarch64_crc32h; break; |
9556 | case AArch64::BI__builtin_arm_crc32ch: |
9557 | CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break; |
9558 | case AArch64::BI__builtin_arm_crc32w: |
9559 | CRCIntrinsicID = Intrinsic::aarch64_crc32w; break; |
9560 | case AArch64::BI__builtin_arm_crc32cw: |
9561 | CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break; |
9562 | case AArch64::BI__builtin_arm_crc32d: |
9563 | CRCIntrinsicID = Intrinsic::aarch64_crc32x; break; |
9564 | case AArch64::BI__builtin_arm_crc32cd: |
9565 | CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break; |
9566 | } |
9567 | |
9568 | if (CRCIntrinsicID != Intrinsic::not_intrinsic) { |
9569 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
9570 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
9571 | Function *F = CGM.getIntrinsic(CRCIntrinsicID); |
9572 | |
9573 | llvm::Type *DataTy = F->getFunctionType()->getParamType(1); |
9574 | Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy); |
9575 | |
9576 | return Builder.CreateCall(F, {Arg0, Arg1}); |
9577 | } |
9578 | |
9579 | |
9580 | Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic; |
9581 | switch (BuiltinID) { |
9582 | case AArch64::BI__builtin_arm_irg: |
9583 | MTEIntrinsicID = Intrinsic::aarch64_irg; break; |
9584 | case AArch64::BI__builtin_arm_addg: |
9585 | MTEIntrinsicID = Intrinsic::aarch64_addg; break; |
9586 | case AArch64::BI__builtin_arm_gmi: |
9587 | MTEIntrinsicID = Intrinsic::aarch64_gmi; break; |
9588 | case AArch64::BI__builtin_arm_ldg: |
9589 | MTEIntrinsicID = Intrinsic::aarch64_ldg; break; |
9590 | case AArch64::BI__builtin_arm_stg: |
9591 | MTEIntrinsicID = Intrinsic::aarch64_stg; break; |
9592 | case AArch64::BI__builtin_arm_subp: |
9593 | MTEIntrinsicID = Intrinsic::aarch64_subp; break; |
9594 | } |
9595 | |
9596 | if (MTEIntrinsicID != Intrinsic::not_intrinsic) { |
9597 | llvm::Type *T = ConvertType(E->getType()); |
9598 | |
9599 | if (MTEIntrinsicID == Intrinsic::aarch64_irg) { |
9600 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
9601 | Value *Mask = EmitScalarExpr(E->getArg(1)); |
9602 | |
9603 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
9604 | Mask = Builder.CreateZExt(Mask, Int64Ty); |
9605 | Value *RV = Builder.CreateCall( |
9606 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask}); |
9607 | return Builder.CreatePointerCast(RV, T); |
9608 | } |
9609 | if (MTEIntrinsicID == Intrinsic::aarch64_addg) { |
9610 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
9611 | Value *TagOffset = EmitScalarExpr(E->getArg(1)); |
9612 | |
9613 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
9614 | TagOffset = Builder.CreateZExt(TagOffset, Int64Ty); |
9615 | Value *RV = Builder.CreateCall( |
9616 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset}); |
9617 | return Builder.CreatePointerCast(RV, T); |
9618 | } |
9619 | if (MTEIntrinsicID == Intrinsic::aarch64_gmi) { |
9620 | Value *Pointer = EmitScalarExpr(E->getArg(0)); |
9621 | Value *ExcludedMask = EmitScalarExpr(E->getArg(1)); |
9622 | |
9623 | ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty); |
9624 | Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); |
9625 | return Builder.CreateCall( |
9626 | CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask}); |
9627 | } |
9628 | |
9629 | |
9630 | |
9631 | if (MTEIntrinsicID == Intrinsic::aarch64_ldg) { |
9632 | Value *TagAddress = EmitScalarExpr(E->getArg(0)); |
9633 | TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); |
9634 | Value *RV = Builder.CreateCall( |
9635 | CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); |
9636 | return Builder.CreatePointerCast(RV, T); |
9637 | } |
9638 | |
9639 | |
9640 | |
9641 | if (MTEIntrinsicID == Intrinsic::aarch64_stg) { |
9642 | Value *TagAddress = EmitScalarExpr(E->getArg(0)); |
9643 | TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); |
9644 | return Builder.CreateCall( |
9645 | CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); |
9646 | } |
9647 | if (MTEIntrinsicID == Intrinsic::aarch64_subp) { |
9648 | Value *PointerA = EmitScalarExpr(E->getArg(0)); |
9649 | Value *PointerB = EmitScalarExpr(E->getArg(1)); |
9650 | PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy); |
9651 | PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy); |
9652 | return Builder.CreateCall( |
9653 | CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB}); |
9654 | } |
9655 | } |
9656 | |
9657 | if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
9658 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
9659 | BuiltinID == AArch64::BI__builtin_arm_rsrp || |
9660 | BuiltinID == AArch64::BI__builtin_arm_wsr || |
9661 | BuiltinID == AArch64::BI__builtin_arm_wsr64 || |
9662 | BuiltinID == AArch64::BI__builtin_arm_wsrp) { |
9663 | |
9664 | SpecialRegisterAccessKind AccessKind = Write; |
9665 | if (BuiltinID == AArch64::BI__builtin_arm_rsr || |
9666 | BuiltinID == AArch64::BI__builtin_arm_rsr64 || |
9667 | BuiltinID == AArch64::BI__builtin_arm_rsrp) |
9668 | AccessKind = VolatileRead; |
9669 | |
9670 | bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp || |
9671 | BuiltinID == AArch64::BI__builtin_arm_wsrp; |
9672 | |
9673 | bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr && |
9674 | BuiltinID != AArch64::BI__builtin_arm_wsr; |
9675 | |
9676 | llvm::Type *ValueType; |
9677 | llvm::Type *RegisterType = Int64Ty; |
9678 | if (IsPointerBuiltin) { |
9679 | ValueType = VoidPtrTy; |
9680 | } else if (Is64Bit) { |
9681 | ValueType = Int64Ty; |
9682 | } else { |
9683 | ValueType = Int32Ty; |
9684 | } |
9685 | |
9686 | return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, |
9687 | AccessKind); |
9688 | } |
9689 | |
9690 | if (BuiltinID == AArch64::BI_ReadStatusReg || |
9691 | BuiltinID == AArch64::BI_WriteStatusReg) { |
9692 | LLVMContext &Context = CGM.getLLVMContext(); |
9693 | |
9694 | unsigned SysReg = |
9695 | E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue(); |
9696 | |
9697 | std::string SysRegStr; |
9698 | llvm::raw_string_ostream(SysRegStr) << |
9699 | ((1 << 1) | ((SysReg >> 14) & 1)) << ":" << |
9700 | ((SysReg >> 11) & 7) << ":" << |
9701 | ((SysReg >> 7) & 15) << ":" << |
9702 | ((SysReg >> 3) & 15) << ":" << |
9703 | ( SysReg & 7); |
9704 | |
9705 | llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) }; |
9706 | llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); |
9707 | llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); |
9708 | |
9709 | llvm::Type *RegisterType = Int64Ty; |
9710 | llvm::Type *Types[] = { RegisterType }; |
9711 | |
9712 | if (BuiltinID == AArch64::BI_ReadStatusReg) { |
9713 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); |
9714 | |
9715 | return Builder.CreateCall(F, Metadata); |
9716 | } |
9717 | |
9718 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); |
9719 | llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1)); |
9720 | |
9721 | return Builder.CreateCall(F, { Metadata, ArgValue }); |
9722 | } |
9723 | |
9724 | if (BuiltinID == AArch64::BI_AddressOfReturnAddress) { |
9725 | llvm::Function *F = |
9726 | CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy); |
9727 | return Builder.CreateCall(F); |
9728 | } |
9729 | |
9730 | if (BuiltinID == AArch64::BI__builtin_sponentry) { |
9731 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy); |
9732 | return Builder.CreateCall(F); |
9733 | } |
9734 | |
9735 | if (BuiltinID == AArch64::BI__mulh || BuiltinID == AArch64::BI__umulh) { |
9736 | llvm::Type *ResType = ConvertType(E->getType()); |
9737 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
9738 | |
9739 | bool IsSigned = BuiltinID == AArch64::BI__mulh; |
9740 | Value *LHS = |
9741 | Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned); |
9742 | Value *RHS = |
9743 | Builder.CreateIntCast(EmitScalarExpr(E->getArg(1)), Int128Ty, IsSigned); |
9744 | |
9745 | Value *MulResult, *HigherBits; |
9746 | if (IsSigned) { |
9747 | MulResult = Builder.CreateNSWMul(LHS, RHS); |
9748 | HigherBits = Builder.CreateAShr(MulResult, 64); |
9749 | } else { |
9750 | MulResult = Builder.CreateNUWMul(LHS, RHS); |
9751 | HigherBits = Builder.CreateLShr(MulResult, 64); |
9752 | } |
9753 | HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned); |
9754 | |
9755 | return HigherBits; |
9756 | } |
9757 | |
9758 | |
9759 | |
9760 | if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID)) |
9761 | return EmitMSVCBuiltinExpr(*MsvcIntId, E); |
9762 | |
9763 | |
9764 | |
9765 | unsigned ICEArguments = 0; |
9766 | ASTContext::GetBuiltinTypeError Error; |
9767 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
9768 | assert(Error == ASTContext::GE_None && "Should not codegen an error"); |
9769 | |
9770 | llvm::SmallVector<Value*, 4> Ops; |
9771 | Address PtrOp0 = Address::invalid(); |
9772 | for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { |
9773 | if (i == 0) { |
9774 | switch (BuiltinID) { |
9775 | case NEON::BI__builtin_neon_vld1_v: |
9776 | case NEON::BI__builtin_neon_vld1q_v: |
9777 | case NEON::BI__builtin_neon_vld1_dup_v: |
9778 | case NEON::BI__builtin_neon_vld1q_dup_v: |
9779 | case NEON::BI__builtin_neon_vld1_lane_v: |
9780 | case NEON::BI__builtin_neon_vld1q_lane_v: |
9781 | case NEON::BI__builtin_neon_vst1_v: |
9782 | case NEON::BI__builtin_neon_vst1q_v: |
9783 | case NEON::BI__builtin_neon_vst1_lane_v: |
9784 | case NEON::BI__builtin_neon_vst1q_lane_v: |
9785 | |
9786 | |
9787 | PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); |
9788 | Ops.push_back(PtrOp0.getPointer()); |
9789 | continue; |
9790 | } |
9791 | } |
9792 | if ((ICEArguments & (1 << i)) == 0) { |
9793 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
9794 | } else { |
9795 | |
9796 | |
9797 | Ops.push_back(llvm::ConstantInt::get( |
9798 | getLLVMContext(), |
9799 | *E->getArg(i)->getIntegerConstantExpr(getContext()))); |
9800 | } |
9801 | } |
9802 | |
9803 | auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap); |
9804 | const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( |
9805 | SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted); |
9806 | |
9807 | if (Builtin) { |
9808 | Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1))); |
9809 | Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E); |
9810 | assert(Result && "SISD intrinsic should have been handled"); |
9811 | return Result; |
9812 | } |
9813 | |
9814 | const Expr *Arg = E->getArg(E->getNumArgs()-1); |
9815 | NeonTypeFlags Type(0); |
9816 | if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext())) |
9817 | |
9818 | Type = NeonTypeFlags(Result->getZExtValue()); |
9819 | |
9820 | bool usgn = Type.isUnsigned(); |
9821 | bool quad = Type.isQuad(); |
9822 | |
9823 | |
9824 | switch (BuiltinID) { |
9825 | default: break; |
9826 | case NEON::BI__builtin_neon_vabsh_f16: |
9827 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
9828 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs"); |
9829 | case NEON::BI__builtin_neon_vaddq_p128: { |
9830 | llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128); |
9831 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
9832 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
9833 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
9834 | Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); |
9835 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128); |
9836 | return Builder.CreateBitCast(Ops[0], Int128Ty); |
9837 | } |
9838 | case NEON::BI__builtin_neon_vldrq_p128: { |
9839 | llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128); |
9840 | llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0); |
9841 | Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy); |
9842 | return Builder.CreateAlignedLoad(Int128Ty, Ptr, |
9843 | CharUnits::fromQuantity(16)); |
9844 | } |
9845 | case NEON::BI__builtin_neon_vstrq_p128: { |
9846 | llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128); |
9847 | Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy); |
9848 | return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr); |
9849 | } |
9850 | case NEON::BI__builtin_neon_vcvts_f32_u32: |
9851 | case NEON::BI__builtin_neon_vcvtd_f64_u64: |
9852 | usgn = true; |
9853 | LLVM_FALLTHROUGH; |
9854 | case NEON::BI__builtin_neon_vcvts_f32_s32: |
9855 | case NEON::BI__builtin_neon_vcvtd_f64_s64: { |
9856 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
9857 | bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; |
9858 | llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty; |
9859 | llvm::Type *FTy = Is64 ? DoubleTy : FloatTy; |
9860 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); |
9861 | if (usgn) |
9862 | return Builder.CreateUIToFP(Ops[0], FTy); |
9863 | return Builder.CreateSIToFP(Ops[0], FTy); |
9864 | } |
9865 | case NEON::BI__builtin_neon_vcvth_f16_u16: |
9866 | case NEON::BI__builtin_neon_vcvth_f16_u32: |
9867 | case NEON::BI__builtin_neon_vcvth_f16_u64: |
9868 | usgn = true; |
9869 | LLVM_FALLTHROUGH; |
9870 | case NEON::BI__builtin_neon_vcvth_f16_s16: |
9871 | case NEON::BI__builtin_neon_vcvth_f16_s32: |
9872 | case NEON::BI__builtin_neon_vcvth_f16_s64: { |
9873 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
9874 | llvm::Type *FTy = HalfTy; |
9875 | llvm::Type *InTy; |
9876 | if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64) |
9877 | InTy = Int64Ty; |
9878 | else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32) |
9879 | InTy = Int32Ty; |
9880 | else |
9881 | InTy = Int16Ty; |
9882 | Ops[0] = Builder.CreateBitCast(Ops[0], InTy); |
9883 | if (usgn) |
9884 | return Builder.CreateUIToFP(Ops[0], FTy); |
9885 | return Builder.CreateSIToFP(Ops[0], FTy); |
9886 | } |
9887 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
9888 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
9889 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
9890 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
9891 | case NEON::BI__builtin_neon_vcvth_u16_f16: |
9892 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
9893 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
9894 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
9895 | case NEON::BI__builtin_neon_vcvtph_s16_f16: |
9896 | case NEON::BI__builtin_neon_vcvth_s16_f16: { |
9897 | unsigned Int; |
9898 | llvm::Type* InTy = Int32Ty; |
9899 | llvm::Type* FTy = HalfTy; |
9900 | llvm::Type *Tys[2] = {InTy, FTy}; |
9901 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
9902 | switch (BuiltinID) { |
9903 | default: llvm_unreachable("missing builtin ID in switch!"); |
9904 | case NEON::BI__builtin_neon_vcvtah_u16_f16: |
9905 | Int = Intrinsic::aarch64_neon_fcvtau; break; |
9906 | case NEON::BI__builtin_neon_vcvtmh_u16_f16: |
9907 | Int = Intrinsic::aarch64_neon_fcvtmu; break; |
9908 | case NEON::BI__builtin_neon_vcvtnh_u16_f16: |
9909 | Int = Intrinsic::aarch64_neon_fcvtnu; break; |
9910 | case NEON::BI__builtin_neon_vcvtph_u16_f16: |
9911 | Int = Intrinsic::aarch64_neon_fcvtpu; break; |
9912 | case NEON::BI__builtin_neon_vcvth_u16_f16: |
9913 | Int = Intrinsic::aarch64_neon_fcvtzu; break; |
9914 | case NEON::BI__builtin_neon_vcvtah_s16_f16: |
9915 | Int = Intrinsic::aarch64_neon_fcvtas; break; |
9916 | case NEON::BI__builtin_neon_vcvtmh_s16_f16: |
9917 | Int = Intrinsic::aarch64_neon_fcvtms; break; |
9918 | case NEON::BI__builtin_neon_vcvtnh_s16_f16: |
9919 | Int = Intrinsic::aarch64_neon_fcvtns; break; |
9920 | case NEON::BI__builtin_neon_vcvtph_s16_f16: |
9921 | Int = Intrinsic::aarch64_neon_fcvtps; break; |
9922 | case NEON::BI__builtin_neon_vcvth_s16_f16: |
9923 | Int = Intrinsic::aarch64_neon_fcvtzs; break; |
9924 | } |
9925 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt"); |
9926 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
9927 | } |
9928 | case NEON::BI__builtin_neon_vcaleh_f16: |
9929 | case NEON::BI__builtin_neon_vcalth_f16: |
9930 | case NEON::BI__builtin_neon_vcageh_f16: |
9931 | case NEON::BI__builtin_neon_vcagth_f16: { |
9932 | unsigned Int; |
9933 | llvm::Type* InTy = Int32Ty; |
9934 | llvm::Type* FTy = HalfTy; |
9935 | llvm::Type *Tys[2] = {InTy, FTy}; |
9936 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
9937 | switch (BuiltinID) { |
9938 | default: llvm_unreachable("missing builtin ID in switch!"); |
9939 | case NEON::BI__builtin_neon_vcageh_f16: |
9940 | Int = Intrinsic::aarch64_neon_facge; break; |
9941 | case NEON::BI__builtin_neon_vcagth_f16: |
9942 | Int = Intrinsic::aarch64_neon_facgt; break; |
9943 | case NEON::BI__builtin_neon_vcaleh_f16: |
9944 | Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break; |
9945 | case NEON::BI__builtin_neon_vcalth_f16: |
9946 | Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break; |
9947 | } |
9948 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg"); |
9949 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
9950 | } |
9951 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
9952 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: { |
9953 | unsigned Int; |
9954 | llvm::Type* InTy = Int32Ty; |
9955 | llvm::Type* FTy = HalfTy; |
9956 | llvm::Type *Tys[2] = {InTy, FTy}; |
9957 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
9958 | switch (BuiltinID) { |
9959 | default: llvm_unreachable("missing builtin ID in switch!"); |
9960 | case NEON::BI__builtin_neon_vcvth_n_s16_f16: |
9961 | Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break; |
9962 | case NEON::BI__builtin_neon_vcvth_n_u16_f16: |
9963 | Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break; |
9964 | } |
9965 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); |
9966 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
9967 | } |
9968 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
9969 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: { |
9970 | unsigned Int; |
9971 | llvm::Type* FTy = HalfTy; |
9972 | llvm::Type* InTy = Int32Ty; |
9973 | llvm::Type *Tys[2] = {FTy, InTy}; |
9974 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
9975 | switch (BuiltinID) { |
9976 | default: llvm_unreachable("missing builtin ID in switch!"); |
9977 | case NEON::BI__builtin_neon_vcvth_n_f16_s16: |
9978 | Int = Intrinsic::aarch64_neon_vcvtfxs2fp; |
9979 | Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext"); |
9980 | break; |
9981 | case NEON::BI__builtin_neon_vcvth_n_f16_u16: |
9982 | Int = Intrinsic::aarch64_neon_vcvtfxu2fp; |
9983 | Ops[0] = Builder.CreateZExt(Ops[0], InTy); |
9984 | break; |
9985 | } |
9986 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); |
9987 | } |
9988 | case NEON::BI__builtin_neon_vpaddd_s64: { |
9989 | auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2); |
9990 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
9991 | |
9992 | Vec = Builder.CreateBitCast(Vec, Ty, "v2i64"); |
9993 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
9994 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
9995 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
9996 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
9997 | |
9998 | return Builder.CreateAdd(Op0, Op1, "vpaddd"); |
9999 | } |
10000 | case NEON::BI__builtin_neon_vpaddd_f64: { |
10001 | auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2); |
10002 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
10003 | |
10004 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f64"); |
10005 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
10006 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
10007 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
10008 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
10009 | |
10010 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); |
10011 | } |
10012 | case NEON::BI__builtin_neon_vpadds_f32: { |
10013 | auto *Ty = llvm::FixedVectorType::get(FloatTy, 2); |
10014 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
10015 | |
10016 | Vec = Builder.CreateBitCast(Vec, Ty, "v2f32"); |
10017 | llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0); |
10018 | llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1); |
10019 | Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0"); |
10020 | Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1"); |
10021 | |
10022 | return Builder.CreateFAdd(Op0, Op1, "vpaddd"); |
10023 | } |
10024 | case NEON::BI__builtin_neon_vceqzd_s64: |
10025 | case NEON::BI__builtin_neon_vceqzd_f64: |
10026 | case NEON::BI__builtin_neon_vceqzs_f32: |
10027 | case NEON::BI__builtin_neon_vceqzh_f16: |
10028 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10029 | return EmitAArch64CompareBuiltinExpr( |
10030 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10031 | ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz"); |
10032 | case NEON::BI__builtin_neon_vcgezd_s64: |
10033 | case NEON::BI__builtin_neon_vcgezd_f64: |
10034 | case NEON::BI__builtin_neon_vcgezs_f32: |
10035 | case NEON::BI__builtin_neon_vcgezh_f16: |
10036 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10037 | return EmitAArch64CompareBuiltinExpr( |
10038 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10039 | ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez"); |
10040 | case NEON::BI__builtin_neon_vclezd_s64: |
10041 | case NEON::BI__builtin_neon_vclezd_f64: |
10042 | case NEON::BI__builtin_neon_vclezs_f32: |
10043 | case NEON::BI__builtin_neon_vclezh_f16: |
10044 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10045 | return EmitAArch64CompareBuiltinExpr( |
10046 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10047 | ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez"); |
10048 | case NEON::BI__builtin_neon_vcgtzd_s64: |
10049 | case NEON::BI__builtin_neon_vcgtzd_f64: |
10050 | case NEON::BI__builtin_neon_vcgtzs_f32: |
10051 | case NEON::BI__builtin_neon_vcgtzh_f16: |
10052 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10053 | return EmitAArch64CompareBuiltinExpr( |
10054 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10055 | ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz"); |
10056 | case NEON::BI__builtin_neon_vcltzd_s64: |
10057 | case NEON::BI__builtin_neon_vcltzd_f64: |
10058 | case NEON::BI__builtin_neon_vcltzs_f32: |
10059 | case NEON::BI__builtin_neon_vcltzh_f16: |
10060 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10061 | return EmitAArch64CompareBuiltinExpr( |
10062 | Ops[0], ConvertType(E->getCallReturnType(getContext())), |
10063 | ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz"); |
10064 | |
10065 | case NEON::BI__builtin_neon_vceqzd_u64: { |
10066 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10067 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
10068 | Ops[0] = |
10069 | Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty)); |
10070 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd"); |
10071 | } |
10072 | case NEON::BI__builtin_neon_vceqd_f64: |
10073 | case NEON::BI__builtin_neon_vcled_f64: |
10074 | case NEON::BI__builtin_neon_vcltd_f64: |
10075 | case NEON::BI__builtin_neon_vcged_f64: |
10076 | case NEON::BI__builtin_neon_vcgtd_f64: { |
10077 | llvm::CmpInst::Predicate P; |
10078 | switch (BuiltinID) { |
10079 | default: llvm_unreachable("missing builtin ID in switch!"); |
10080 | case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break; |
10081 | case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break; |
10082 | case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break; |
10083 | case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break; |
10084 | case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break; |
10085 | } |
10086 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10087 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
10088 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); |
10089 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
10090 | return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd"); |
10091 | } |
10092 | case NEON::BI__builtin_neon_vceqs_f32: |
10093 | case NEON::BI__builtin_neon_vcles_f32: |
10094 | case NEON::BI__builtin_neon_vclts_f32: |
10095 | case NEON::BI__builtin_neon_vcges_f32: |
10096 | case NEON::BI__builtin_neon_vcgts_f32: { |
10097 | llvm::CmpInst::Predicate P; |
10098 | switch (BuiltinID) { |
10099 | default: llvm_unreachable("missing builtin ID in switch!"); |
10100 | case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break; |
10101 | case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break; |
10102 | case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break; |
10103 | case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break; |
10104 | case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break; |
10105 | } |
10106 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10107 | Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy); |
10108 | Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy); |
10109 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
10110 | return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd"); |
10111 | } |
10112 | case NEON::BI__builtin_neon_vceqh_f16: |
10113 | case NEON::BI__builtin_neon_vcleh_f16: |
10114 | case NEON::BI__builtin_neon_vclth_f16: |
10115 | case NEON::BI__builtin_neon_vcgeh_f16: |
10116 | case NEON::BI__builtin_neon_vcgth_f16: { |
10117 | llvm::CmpInst::Predicate P; |
10118 | switch (BuiltinID) { |
10119 | default: llvm_unreachable("missing builtin ID in switch!"); |
10120 | case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break; |
10121 | case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break; |
10122 | case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break; |
10123 | case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break; |
10124 | case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break; |
10125 | } |
10126 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10127 | Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); |
10128 | Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy); |
10129 | Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]); |
10130 | return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd"); |
10131 | } |
10132 | case NEON::BI__builtin_neon_vceqd_s64: |
10133 | case NEON::BI__builtin_neon_vceqd_u64: |
10134 | case NEON::BI__builtin_neon_vcgtd_s64: |
10135 | case NEON::BI__builtin_neon_vcgtd_u64: |
10136 | case NEON::BI__builtin_neon_vcltd_s64: |
10137 | case NEON::BI__builtin_neon_vcltd_u64: |
10138 | case NEON::BI__builtin_neon_vcged_u64: |
10139 | case NEON::BI__builtin_neon_vcged_s64: |
10140 | case NEON::BI__builtin_neon_vcled_u64: |
10141 | case NEON::BI__builtin_neon_vcled_s64: { |
10142 | llvm::CmpInst::Predicate P; |
10143 | switch (BuiltinID) { |
10144 | default: llvm_unreachable("missing builtin ID in switch!"); |
10145 | case NEON::BI__builtin_neon_vceqd_s64: |
10146 | case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break; |
10147 | case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break; |
10148 | case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break; |
10149 | case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break; |
10150 | case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break; |
10151 | case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break; |
10152 | case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break; |
10153 | case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break; |
10154 | case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break; |
10155 | } |
10156 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10157 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
10158 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
10159 | Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]); |
10160 | return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd"); |
10161 | } |
10162 | case NEON::BI__builtin_neon_vtstd_s64: |
10163 | case NEON::BI__builtin_neon_vtstd_u64: { |
10164 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10165 | Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); |
10166 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
10167 | Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); |
10168 | Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], |
10169 | llvm::Constant::getNullValue(Int64Ty)); |
10170 | return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd"); |
10171 | } |
10172 | case NEON::BI__builtin_neon_vset_lane_i8: |
10173 | case NEON::BI__builtin_neon_vset_lane_i16: |
10174 | case NEON::BI__builtin_neon_vset_lane_i32: |
10175 | case NEON::BI__builtin_neon_vset_lane_i64: |
10176 | case NEON::BI__builtin_neon_vset_lane_bf16: |
10177 | case NEON::BI__builtin_neon_vset_lane_f32: |
10178 | case NEON::BI__builtin_neon_vsetq_lane_i8: |
10179 | case NEON::BI__builtin_neon_vsetq_lane_i16: |
10180 | case NEON::BI__builtin_neon_vsetq_lane_i32: |
10181 | case NEON::BI__builtin_neon_vsetq_lane_i64: |
10182 | case NEON::BI__builtin_neon_vsetq_lane_bf16: |
10183 | case NEON::BI__builtin_neon_vsetq_lane_f32: |
10184 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10185 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
10186 | case NEON::BI__builtin_neon_vset_lane_f64: |
10187 | |
10188 | Ops[1] = |
10189 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1)); |
10190 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10191 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
10192 | case NEON::BI__builtin_neon_vsetq_lane_f64: |
10193 | |
10194 | Ops[1] = |
10195 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2)); |
10196 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10197 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); |
10198 | |
10199 | case NEON::BI__builtin_neon_vget_lane_i8: |
10200 | case NEON::BI__builtin_neon_vdupb_lane_i8: |
10201 | Ops[0] = |
10202 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8)); |
10203 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10204 | "vget_lane"); |
10205 | case NEON::BI__builtin_neon_vgetq_lane_i8: |
10206 | case NEON::BI__builtin_neon_vdupb_laneq_i8: |
10207 | Ops[0] = |
10208 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16)); |
10209 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10210 | "vgetq_lane"); |
10211 | case NEON::BI__builtin_neon_vget_lane_i16: |
10212 | case NEON::BI__builtin_neon_vduph_lane_i16: |
10213 | Ops[0] = |
10214 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4)); |
10215 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10216 | "vget_lane"); |
10217 | case NEON::BI__builtin_neon_vgetq_lane_i16: |
10218 | case NEON::BI__builtin_neon_vduph_laneq_i16: |
10219 | Ops[0] = |
10220 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8)); |
10221 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10222 | "vgetq_lane"); |
10223 | case NEON::BI__builtin_neon_vget_lane_i32: |
10224 | case NEON::BI__builtin_neon_vdups_lane_i32: |
10225 | Ops[0] = |
10226 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2)); |
10227 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10228 | "vget_lane"); |
10229 | case NEON::BI__builtin_neon_vdups_lane_f32: |
10230 | Ops[0] = |
10231 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2)); |
10232 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10233 | "vdups_lane"); |
10234 | case NEON::BI__builtin_neon_vgetq_lane_i32: |
10235 | case NEON::BI__builtin_neon_vdups_laneq_i32: |
10236 | Ops[0] = |
10237 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4)); |
10238 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10239 | "vgetq_lane"); |
10240 | case NEON::BI__builtin_neon_vget_lane_i64: |
10241 | case NEON::BI__builtin_neon_vdupd_lane_i64: |
10242 | Ops[0] = |
10243 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1)); |
10244 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10245 | "vget_lane"); |
10246 | case NEON::BI__builtin_neon_vdupd_lane_f64: |
10247 | Ops[0] = |
10248 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1)); |
10249 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10250 | "vdupd_lane"); |
10251 | case NEON::BI__builtin_neon_vgetq_lane_i64: |
10252 | case NEON::BI__builtin_neon_vdupd_laneq_i64: |
10253 | Ops[0] = |
10254 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2)); |
10255 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10256 | "vgetq_lane"); |
10257 | case NEON::BI__builtin_neon_vget_lane_f32: |
10258 | Ops[0] = |
10259 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2)); |
10260 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10261 | "vget_lane"); |
10262 | case NEON::BI__builtin_neon_vget_lane_f64: |
10263 | Ops[0] = |
10264 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1)); |
10265 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10266 | "vget_lane"); |
10267 | case NEON::BI__builtin_neon_vgetq_lane_f32: |
10268 | case NEON::BI__builtin_neon_vdups_laneq_f32: |
10269 | Ops[0] = |
10270 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4)); |
10271 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10272 | "vgetq_lane"); |
10273 | case NEON::BI__builtin_neon_vgetq_lane_f64: |
10274 | case NEON::BI__builtin_neon_vdupd_laneq_f64: |
10275 | Ops[0] = |
10276 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2)); |
10277 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10278 | "vgetq_lane"); |
10279 | case NEON::BI__builtin_neon_vaddh_f16: |
10280 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10281 | return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh"); |
10282 | case NEON::BI__builtin_neon_vsubh_f16: |
10283 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10284 | return Builder.CreateFSub(Ops[0], Ops[1], "vsubh"); |
10285 | case NEON::BI__builtin_neon_vmulh_f16: |
10286 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10287 | return Builder.CreateFMul(Ops[0], Ops[1], "vmulh"); |
10288 | case NEON::BI__builtin_neon_vdivh_f16: |
10289 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10290 | return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh"); |
10291 | case NEON::BI__builtin_neon_vfmah_f16: |
10292 | |
10293 | return emitCallMaybeConstrainedFPBuiltin( |
10294 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy, |
10295 | {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]}); |
10296 | case NEON::BI__builtin_neon_vfmsh_f16: { |
10297 | |
10298 | Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy); |
10299 | Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh"); |
10300 | |
10301 | |
10302 | return emitCallMaybeConstrainedFPBuiltin( |
10303 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy, |
10304 | {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]}); |
10305 | } |
10306 | case NEON::BI__builtin_neon_vaddd_s64: |
10307 | case NEON::BI__builtin_neon_vaddd_u64: |
10308 | return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd"); |
10309 | case NEON::BI__builtin_neon_vsubd_s64: |
10310 | case NEON::BI__builtin_neon_vsubd_u64: |
10311 | return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd"); |
10312 | case NEON::BI__builtin_neon_vqdmlalh_s16: |
10313 | case NEON::BI__builtin_neon_vqdmlslh_s16: { |
10314 | SmallVector<Value *, 2> ProductOps; |
10315 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); |
10316 | ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2)))); |
10317 | auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4); |
10318 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), |
10319 | ProductOps, "vqdmlXl"); |
10320 | Constant *CI = ConstantInt::get(SizeTy, 0); |
10321 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); |
10322 | |
10323 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16 |
10324 | ? Intrinsic::aarch64_neon_sqadd |
10325 | : Intrinsic::aarch64_neon_sqsub; |
10326 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl"); |
10327 | } |
10328 | case NEON::BI__builtin_neon_vqshlud_n_s64: { |
10329 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10330 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); |
10331 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), |
10332 | Ops, "vqshlu_n"); |
10333 | } |
10334 | case NEON::BI__builtin_neon_vqshld_n_u64: |
10335 | case NEON::BI__builtin_neon_vqshld_n_s64: { |
10336 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 |
10337 | ? Intrinsic::aarch64_neon_uqshl |
10338 | : Intrinsic::aarch64_neon_sqshl; |
10339 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10340 | Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); |
10341 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n"); |
10342 | } |
10343 | case NEON::BI__builtin_neon_vrshrd_n_u64: |
10344 | case NEON::BI__builtin_neon_vrshrd_n_s64: { |
10345 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 |
10346 | ? Intrinsic::aarch64_neon_urshl |
10347 | : Intrinsic::aarch64_neon_srshl; |
10348 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10349 | int SV = cast<ConstantInt>(Ops[1])->getSExtValue(); |
10350 | Ops[1] = ConstantInt::get(Int64Ty, -SV); |
10351 | return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n"); |
10352 | } |
10353 | case NEON::BI__builtin_neon_vrsrad_n_u64: |
10354 | case NEON::BI__builtin_neon_vrsrad_n_s64: { |
10355 | unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 |
10356 | ? Intrinsic::aarch64_neon_urshl |
10357 | : Intrinsic::aarch64_neon_srshl; |
10358 | Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); |
10359 | Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); |
10360 | Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty), |
10361 | {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)}); |
10362 | return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty)); |
10363 | } |
10364 | case NEON::BI__builtin_neon_vshld_n_s64: |
10365 | case NEON::BI__builtin_neon_vshld_n_u64: { |
10366 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
10367 | return Builder.CreateShl( |
10368 | Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n"); |
10369 | } |
10370 | case NEON::BI__builtin_neon_vshrd_n_s64: { |
10371 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
10372 | return Builder.CreateAShr( |
10373 | Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), |
10374 | Amt->getZExtValue())), |
10375 | "shrd_n"); |
10376 | } |
10377 | case NEON::BI__builtin_neon_vshrd_n_u64: { |
10378 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
10379 | uint64_t ShiftAmt = Amt->getZExtValue(); |
10380 | |
10381 | if (ShiftAmt == 64) |
10382 | return ConstantInt::get(Int64Ty, 0); |
10383 | return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt), |
10384 | "shrd_n"); |
10385 | } |
10386 | case NEON::BI__builtin_neon_vsrad_n_s64: { |
10387 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); |
10388 | Ops[1] = Builder.CreateAShr( |
10389 | Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63), |
10390 | Amt->getZExtValue())), |
10391 | "shrd_n"); |
10392 | return Builder.CreateAdd(Ops[0], Ops[1]); |
10393 | } |
10394 | case NEON::BI__builtin_neon_vsrad_n_u64: { |
10395 | llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); |
10396 | uint64_t ShiftAmt = Amt->getZExtValue(); |
10397 | |
10398 | |
10399 | if (ShiftAmt == 64) |
10400 | return Ops[0]; |
10401 | Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt), |
10402 | "shrd_n"); |
10403 | return Builder.CreateAdd(Ops[0], Ops[1]); |
10404 | } |
10405 | case NEON::BI__builtin_neon_vqdmlalh_lane_s16: |
10406 | case NEON::BI__builtin_neon_vqdmlalh_laneq_s16: |
10407 | case NEON::BI__builtin_neon_vqdmlslh_lane_s16: |
10408 | case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: { |
10409 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), |
10410 | "lane"); |
10411 | SmallVector<Value *, 2> ProductOps; |
10412 | ProductOps.push_back(vectorWrapScalar16(Ops[1])); |
10413 | ProductOps.push_back(vectorWrapScalar16(Ops[2])); |
10414 | auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4); |
10415 | Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), |
10416 | ProductOps, "vqdmlXl"); |
10417 | Constant *CI = ConstantInt::get(SizeTy, 0); |
10418 | Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); |
10419 | Ops.pop_back(); |
10420 | |
10421 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 || |
10422 | BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16) |
10423 | ? Intrinsic::aarch64_neon_sqadd |
10424 | : Intrinsic::aarch64_neon_sqsub; |
10425 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl"); |
10426 | } |
10427 | case NEON::BI__builtin_neon_vqdmlals_s32: |
10428 | case NEON::BI__builtin_neon_vqdmlsls_s32: { |
10429 | SmallVector<Value *, 2> ProductOps; |
10430 | ProductOps.push_back(Ops[1]); |
10431 | ProductOps.push_back(EmitScalarExpr(E->getArg(2))); |
10432 | Ops[1] = |
10433 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), |
10434 | ProductOps, "vqdmlXl"); |
10435 | |
10436 | unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32 |
10437 | ? Intrinsic::aarch64_neon_sqadd |
10438 | : Intrinsic::aarch64_neon_sqsub; |
10439 | return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl"); |
10440 | } |
10441 | case NEON::BI__builtin_neon_vqdmlals_lane_s32: |
10442 | case NEON::BI__builtin_neon_vqdmlals_laneq_s32: |
10443 | case NEON::BI__builtin_neon_vqdmlsls_lane_s32: |
10444 | case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: { |
10445 | Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), |
10446 | "lane"); |
10447 | SmallVector<Value *, 2> ProductOps; |
10448 | ProductOps.push_back(Ops[1]); |
10449 | ProductOps.push_back(Ops[2]); |
10450 | Ops[1] = |
10451 | EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar), |
10452 | ProductOps, "vqdmlXl"); |
10453 | Ops.pop_back(); |
10454 | |
10455 | unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 || |
10456 | BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32) |
10457 | ? Intrinsic::aarch64_neon_sqadd |
10458 | : Intrinsic::aarch64_neon_sqsub; |
10459 | return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl"); |
10460 | } |
10461 | case NEON::BI__builtin_neon_vget_lane_bf16: |
10462 | case NEON::BI__builtin_neon_vduph_lane_bf16: |
10463 | case NEON::BI__builtin_neon_vduph_lane_f16: { |
10464 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10465 | "vget_lane"); |
10466 | } |
10467 | case NEON::BI__builtin_neon_vgetq_lane_bf16: |
10468 | case NEON::BI__builtin_neon_vduph_laneq_bf16: |
10469 | case NEON::BI__builtin_neon_vduph_laneq_f16: { |
10470 | return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), |
10471 | "vgetq_lane"); |
10472 | } |
10473 | |
10474 | case AArch64::BI_InterlockedAdd: { |
10475 | Value *Arg0 = EmitScalarExpr(E->getArg(0)); |
10476 | Value *Arg1 = EmitScalarExpr(E->getArg(1)); |
10477 | AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( |
10478 | AtomicRMWInst::Add, Arg0, Arg1, |
10479 | llvm::AtomicOrdering::SequentiallyConsistent); |
10480 | return Builder.CreateAdd(RMWI, Arg1); |
10481 | } |
10482 | } |
10483 | |
10484 | llvm::FixedVectorType *VTy = GetNeonType(this, Type); |
10485 | llvm::Type *Ty = VTy; |
10486 | if (!Ty) |
10487 | return nullptr; |
10488 | |
10489 | |
10490 | |
10491 | Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, |
10492 | AArch64SIMDIntrinsicsProvenSorted); |
10493 | |
10494 | if (Builtin) |
10495 | return EmitCommonNeonBuiltinExpr( |
10496 | Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, |
10497 | Builtin->NameHint, Builtin->TypeModifier, E, Ops, |
10498 | Address::invalid(), Address::invalid(), Arch); |
10499 | |
10500 | if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) |
10501 | return V; |
10502 | |
10503 | unsigned Int; |
10504 | switch (BuiltinID) { |
10505 | default: return nullptr; |
10506 | case NEON::BI__builtin_neon_vbsl_v: |
10507 | case NEON::BI__builtin_neon_vbslq_v: { |
10508 | llvm::Type *BitTy = llvm::VectorType::getInteger(VTy); |
10509 | Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl"); |
10510 | Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl"); |
10511 | Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl"); |
10512 | |
10513 | Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl"); |
10514 | Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl"); |
10515 | Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl"); |
10516 | return Builder.CreateBitCast(Ops[0], Ty); |
10517 | } |
10518 | case NEON::BI__builtin_neon_vfma_lane_v: |
10519 | case NEON::BI__builtin_neon_vfmaq_lane_v: { |
10520 | |
10521 | |
10522 | Value *Addend = Ops[0]; |
10523 | Value *Multiplicand = Ops[1]; |
10524 | Value *LaneSource = Ops[2]; |
10525 | Ops[0] = Multiplicand; |
10526 | Ops[1] = LaneSource; |
10527 | Ops[2] = Addend; |
10528 | |
10529 | |
10530 | auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v |
10531 | ? llvm::FixedVectorType::get(VTy->getElementType(), |
10532 | VTy->getNumElements() / 2) |
10533 | : VTy; |
10534 | llvm::Constant *cst = cast<Constant>(Ops[3]); |
10535 | Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst); |
10536 | Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy); |
10537 | Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane"); |
10538 | |
10539 | Ops.pop_back(); |
10540 | Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma |
10541 | : Intrinsic::fma; |
10542 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla"); |
10543 | } |
10544 | case NEON::BI__builtin_neon_vfma_laneq_v: { |
10545 | auto *VTy = cast<llvm::FixedVectorType>(Ty); |
10546 | |
10547 | if (VTy && VTy->getElementType() == DoubleTy) { |
10548 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
10549 | Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); |
10550 | llvm::FixedVectorType *VTy = |
10551 | GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); |
10552 | Ops[2] = Builder.CreateBitCast(Ops[2], VTy); |
10553 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); |
10554 | Value *Result; |
10555 | Result = emitCallMaybeConstrainedFPBuiltin( |
10556 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, |
10557 | DoubleTy, {Ops[1], Ops[2], Ops[0]}); |
10558 | return Builder.CreateBitCast(Result, Ty); |
10559 | } |
10560 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10561 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
10562 | |
10563 | auto *STy = llvm::FixedVectorType::get(VTy->getElementType(), |
10564 | VTy->getNumElements() * 2); |
10565 | Ops[2] = Builder.CreateBitCast(Ops[2], STy); |
10566 | Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), |
10567 | cast<ConstantInt>(Ops[3])); |
10568 | Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane"); |
10569 | |
10570 | return emitCallMaybeConstrainedFPBuiltin( |
10571 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
10572 | {Ops[2], Ops[1], Ops[0]}); |
10573 | } |
10574 | case NEON::BI__builtin_neon_vfmaq_laneq_v: { |
10575 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10576 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
10577 | |
10578 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
10579 | Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3])); |
10580 | return emitCallMaybeConstrainedFPBuiltin( |
10581 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
10582 | {Ops[2], Ops[1], Ops[0]}); |
10583 | } |
10584 | case NEON::BI__builtin_neon_vfmah_lane_f16: |
10585 | case NEON::BI__builtin_neon_vfmas_lane_f32: |
10586 | case NEON::BI__builtin_neon_vfmah_laneq_f16: |
10587 | case NEON::BI__builtin_neon_vfmas_laneq_f32: |
10588 | case NEON::BI__builtin_neon_vfmad_lane_f64: |
10589 | case NEON::BI__builtin_neon_vfmad_laneq_f64: { |
10590 | Ops.push_back(EmitScalarExpr(E->getArg(3))); |
10591 | llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); |
10592 | Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); |
10593 | return emitCallMaybeConstrainedFPBuiltin( |
10594 | *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty, |
10595 | {Ops[1], Ops[2], Ops[0]}); |
10596 | } |
10597 | case NEON::BI__builtin_neon_vmull_v: |
10598 | |
10599 | Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull; |
10600 | if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull; |
10601 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); |
10602 | case NEON::BI__builtin_neon_vmax_v: |
10603 | case NEON::BI__builtin_neon_vmaxq_v: |
10604 | |
10605 | Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax; |
10606 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; |
10607 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); |
10608 | case NEON::BI__builtin_neon_vmaxh_f16: { |
10609 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10610 | Int = Intrinsic::aarch64_neon_fmax; |
10611 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax"); |
10612 | } |
10613 | case NEON::BI__builtin_neon_vmin_v: |
10614 | case NEON::BI__builtin_neon_vminq_v: |
10615 | |
10616 | Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin; |
10617 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; |
10618 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); |
10619 | case NEON::BI__builtin_neon_vminh_f16: { |
10620 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10621 | Int = Intrinsic::aarch64_neon_fmin; |
10622 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin"); |
10623 | } |
10624 | case NEON::BI__builtin_neon_vabd_v: |
10625 | case NEON::BI__builtin_neon_vabdq_v: |
10626 | |
10627 | Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd; |
10628 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; |
10629 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); |
10630 | case NEON::BI__builtin_neon_vpadal_v: |
10631 | case NEON::BI__builtin_neon_vpadalq_v: { |
10632 | unsigned ArgElts = VTy->getNumElements(); |
10633 | llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType()); |
10634 | unsigned BitWidth = EltTy->getBitWidth(); |
10635 | auto *ArgTy = llvm::FixedVectorType::get( |
10636 | llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts); |
10637 | llvm::Type* Tys[2] = { VTy, ArgTy }; |
10638 | Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; |
10639 | SmallVector<llvm::Value*, 1> TmpOps; |
10640 | TmpOps.push_back(Ops[1]); |
10641 | Function *F = CGM.getIntrinsic(Int, Tys); |
10642 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal"); |
10643 | llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType()); |
10644 | return Builder.CreateAdd(tmp, addend); |
10645 | } |
10646 | case NEON::BI__builtin_neon_vpmin_v: |
10647 | case NEON::BI__builtin_neon_vpminq_v: |
10648 | |
10649 | Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp; |
10650 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; |
10651 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); |
10652 | case NEON::BI__builtin_neon_vpmax_v: |
10653 | case NEON::BI__builtin_neon_vpmaxq_v: |
10654 | |
10655 | Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp; |
10656 | if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; |
10657 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); |
10658 | case NEON::BI__builtin_neon_vminnm_v: |
10659 | case NEON::BI__builtin_neon_vminnmq_v: |
10660 | Int = Intrinsic::aarch64_neon_fminnm; |
10661 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm"); |
10662 | case NEON::BI__builtin_neon_vminnmh_f16: |
10663 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10664 | Int = Intrinsic::aarch64_neon_fminnm; |
10665 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm"); |
10666 | case NEON::BI__builtin_neon_vmaxnm_v: |
10667 | case NEON::BI__builtin_neon_vmaxnmq_v: |
10668 | Int = Intrinsic::aarch64_neon_fmaxnm; |
10669 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm"); |
10670 | case NEON::BI__builtin_neon_vmaxnmh_f16: |
10671 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10672 | Int = Intrinsic::aarch64_neon_fmaxnm; |
10673 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm"); |
10674 | case NEON::BI__builtin_neon_vrecpss_f32: { |
10675 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10676 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy), |
10677 | Ops, "vrecps"); |
10678 | } |
10679 | case NEON::BI__builtin_neon_vrecpsd_f64: |
10680 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10681 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy), |
10682 | Ops, "vrecps"); |
10683 | case NEON::BI__builtin_neon_vrecpsh_f16: |
10684 | Ops.push_back(EmitScalarExpr(E->getArg(1))); |
10685 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy), |
10686 | Ops, "vrecps"); |
10687 | case NEON::BI__builtin_neon_vqshrun_n_v: |
10688 | Int = Intrinsic::aarch64_neon_sqshrun; |
10689 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n"); |
10690 | case NEON::BI__builtin_neon_vqrshrun_n_v: |
10691 | Int = Intrinsic::aarch64_neon_sqrshrun; |
10692 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n"); |
10693 | case NEON::BI__builtin_neon_vqshrn_n_v: |
10694 | Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; |
10695 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n"); |
10696 | case NEON::BI__builtin_neon_vrshrn_n_v: |
10697 | Int = Intrinsic::aarch64_neon_rshrn; |
10698 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n"); |
10699 | case NEON::BI__builtin_neon_vqrshrn_n_v: |
10700 | Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn; |
10701 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n"); |
10702 | case NEON::BI__builtin_neon_vrndah_f16: { |
10703 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10704 | Int = Builder.getIsFPConstrained() |
10705 | ? Intrinsic::experimental_constrained_round |
10706 | : Intrinsic::round; |
10707 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda"); |
10708 | } |
10709 | case NEON::BI__builtin_neon_vrnda_v: |
10710 | case NEON::BI__builtin_neon_vrndaq_v: { |
10711 | Int = Builder.getIsFPConstrained() |
10712 | ? Intrinsic::experimental_constrained_round |
10713 | : Intrinsic::round; |
10714 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda"); |
10715 | } |
10716 | case NEON::BI__builtin_neon_vrndih_f16: { |
10717 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10718 | Int = Builder.getIsFPConstrained() |
10719 | ? Intrinsic::experimental_constrained_nearbyint |
10720 | : Intrinsic::nearbyint; |
10721 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi"); |
10722 | } |
10723 | case NEON::BI__builtin_neon_vrndmh_f16: { |
10724 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10725 | Int = Builder.getIsFPConstrained() |
10726 | ? Intrinsic::experimental_constrained_floor |
10727 | : Intrinsic::floor; |
10728 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm"); |
10729 | } |
10730 | case NEON::BI__builtin_neon_vrndm_v: |
10731 | case NEON::BI__builtin_neon_vrndmq_v: { |
10732 | Int = Builder.getIsFPConstrained() |
10733 | ? Intrinsic::experimental_constrained_floor |
10734 | : Intrinsic::floor; |
10735 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm"); |
10736 | } |
10737 | case NEON::BI__builtin_neon_vrndnh_f16: { |
10738 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10739 | Int = Builder.getIsFPConstrained() |
10740 | ? Intrinsic::experimental_constrained_roundeven |
10741 | : Intrinsic::roundeven; |
10742 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn"); |
10743 | } |
10744 | case NEON::BI__builtin_neon_vrndn_v: |
10745 | case NEON::BI__builtin_neon_vrndnq_v: { |
10746 | Int = Builder.getIsFPConstrained() |
10747 | ? Intrinsic::experimental_constrained_roundeven |
10748 | : Intrinsic::roundeven; |
10749 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn"); |
10750 | } |
10751 | case NEON::BI__builtin_neon_vrndns_f32: { |
10752 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10753 | Int = Builder.getIsFPConstrained() |
10754 | ? Intrinsic::experimental_constrained_roundeven |
10755 | : Intrinsic::roundeven; |
10756 | return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn"); |
10757 | } |
10758 | case NEON::BI__builtin_neon_vrndph_f16: { |
10759 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10760 | Int = Builder.getIsFPConstrained() |
10761 | ? Intrinsic::experimental_constrained_ceil |
10762 | : Intrinsic::ceil; |
10763 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp"); |
10764 | } |
10765 | case NEON::BI__builtin_neon_vrndp_v: |
10766 | case NEON::BI__builtin_neon_vrndpq_v: { |
10767 | Int = Builder.getIsFPConstrained() |
10768 | ? Intrinsic::experimental_constrained_ceil |
10769 | : Intrinsic::ceil; |
10770 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp"); |
10771 | } |
10772 | case NEON::BI__builtin_neon_vrndxh_f16: { |
10773 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10774 | Int = Builder.getIsFPConstrained() |
10775 | ? Intrinsic::experimental_constrained_rint |
10776 | : Intrinsic::rint; |
10777 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx"); |
10778 | } |
10779 | case NEON::BI__builtin_neon_vrndx_v: |
10780 | case NEON::BI__builtin_neon_vrndxq_v: { |
10781 | Int = Builder.getIsFPConstrained() |
10782 | ? Intrinsic::experimental_constrained_rint |
10783 | : Intrinsic::rint; |
10784 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx"); |
10785 | } |
10786 | case NEON::BI__builtin_neon_vrndh_f16: { |
10787 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10788 | Int = Builder.getIsFPConstrained() |
10789 | ? Intrinsic::experimental_constrained_trunc |
10790 | : Intrinsic::trunc; |
10791 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz"); |
10792 | } |
10793 | case NEON::BI__builtin_neon_vrnd32x_v: |
10794 | case NEON::BI__builtin_neon_vrnd32xq_v: { |
10795 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10796 | Int = Intrinsic::aarch64_neon_frint32x; |
10797 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x"); |
10798 | } |
10799 | case NEON::BI__builtin_neon_vrnd32z_v: |
10800 | case NEON::BI__builtin_neon_vrnd32zq_v: { |
10801 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10802 | Int = Intrinsic::aarch64_neon_frint32z; |
10803 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z"); |
10804 | } |
10805 | case NEON::BI__builtin_neon_vrnd64x_v: |
10806 | case NEON::BI__builtin_neon_vrnd64xq_v: { |
10807 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10808 | Int = Intrinsic::aarch64_neon_frint64x; |
10809 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x"); |
10810 | } |
10811 | case NEON::BI__builtin_neon_vrnd64z_v: |
10812 | case NEON::BI__builtin_neon_vrnd64zq_v: { |
10813 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10814 | Int = Intrinsic::aarch64_neon_frint64z; |
10815 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z"); |
10816 | } |
10817 | case NEON::BI__builtin_neon_vrnd_v: |
10818 | case NEON::BI__builtin_neon_vrndq_v: { |
10819 | Int = Builder.getIsFPConstrained() |
10820 | ? Intrinsic::experimental_constrained_trunc |
10821 | : Intrinsic::trunc; |
10822 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz"); |
10823 | } |
10824 | case NEON::BI__builtin_neon_vcvt_f64_v: |
10825 | case NEON::BI__builtin_neon_vcvtq_f64_v: |
10826 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10827 | Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad)); |
10828 | return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") |
10829 | : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); |
10830 | case NEON::BI__builtin_neon_vcvt_f64_f32: { |
10831 | assert(Type.getEltType() == NeonTypeFlags::Float64 && quad && |
10832 | "unexpected vcvt_f64_f32 builtin"); |
10833 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false); |
10834 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); |
10835 | |
10836 | return Builder.CreateFPExt(Ops[0], Ty, "vcvt"); |
10837 | } |
10838 | case NEON::BI__builtin_neon_vcvt_f32_f64: { |
10839 | assert(Type.getEltType() == NeonTypeFlags::Float32 && |
10840 | "unexpected vcvt_f32_f64 builtin"); |
10841 | NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true); |
10842 | Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag)); |
10843 | |
10844 | return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt"); |
10845 | } |
10846 | case NEON::BI__builtin_neon_vcvt_s32_v: |
10847 | case NEON::BI__builtin_neon_vcvt_u32_v: |
10848 | case NEON::BI__builtin_neon_vcvt_s64_v: |
10849 | case NEON::BI__builtin_neon_vcvt_u64_v: |
10850 | case NEON::BI__builtin_neon_vcvt_s16_v: |
10851 | case NEON::BI__builtin_neon_vcvt_u16_v: |
10852 | case NEON::BI__builtin_neon_vcvtq_s32_v: |
10853 | case NEON::BI__builtin_neon_vcvtq_u32_v: |
10854 | case NEON::BI__builtin_neon_vcvtq_s64_v: |
10855 | case NEON::BI__builtin_neon_vcvtq_u64_v: |
10856 | case NEON::BI__builtin_neon_vcvtq_s16_v: |
10857 | case NEON::BI__builtin_neon_vcvtq_u16_v: { |
10858 | Int = |
10859 | usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs; |
10860 | llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)}; |
10861 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz"); |
10862 | } |
10863 | case NEON::BI__builtin_neon_vcvta_s16_v: |
10864 | case NEON::BI__builtin_neon_vcvta_u16_v: |
10865 | case NEON::BI__builtin_neon_vcvta_s32_v: |
10866 | case NEON::BI__builtin_neon_vcvtaq_s16_v: |
10867 | case NEON::BI__builtin_neon_vcvtaq_s32_v: |
10868 | case NEON::BI__builtin_neon_vcvta_u32_v: |
10869 | case NEON::BI__builtin_neon_vcvtaq_u16_v: |
10870 | case NEON::BI__builtin_neon_vcvtaq_u32_v: |
10871 | case NEON::BI__builtin_neon_vcvta_s64_v: |
10872 | case NEON::BI__builtin_neon_vcvtaq_s64_v: |
10873 | case NEON::BI__builtin_neon_vcvta_u64_v: |
10874 | case NEON::BI__builtin_neon_vcvtaq_u64_v: { |
10875 | Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; |
10876 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
10877 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta"); |
10878 | } |
10879 | case NEON::BI__builtin_neon_vcvtm_s16_v: |
10880 | case NEON::BI__builtin_neon_vcvtm_s32_v: |
10881 | case NEON::BI__builtin_neon_vcvtmq_s16_v: |
10882 | case NEON::BI__builtin_neon_vcvtmq_s32_v: |
10883 | case NEON::BI__builtin_neon_vcvtm_u16_v: |
10884 | case NEON::BI__builtin_neon_vcvtm_u32_v: |
10885 | case NEON::BI__builtin_neon_vcvtmq_u16_v: |
10886 | case NEON::BI__builtin_neon_vcvtmq_u32_v: |
10887 | case NEON::BI__builtin_neon_vcvtm_s64_v: |
10888 | case NEON::BI__builtin_neon_vcvtmq_s64_v: |
10889 | case NEON::BI__builtin_neon_vcvtm_u64_v: |
10890 | case NEON::BI__builtin_neon_vcvtmq_u64_v: { |
10891 | Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; |
10892 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
10893 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm"); |
10894 | } |
10895 | case NEON::BI__builtin_neon_vcvtn_s16_v: |
10896 | case NEON::BI__builtin_neon_vcvtn_s32_v: |
10897 | case NEON::BI__builtin_neon_vcvtnq_s16_v: |
10898 | case NEON::BI__builtin_neon_vcvtnq_s32_v: |
10899 | case NEON::BI__builtin_neon_vcvtn_u16_v: |
10900 | case NEON::BI__builtin_neon_vcvtn_u32_v: |
10901 | case NEON::BI__builtin_neon_vcvtnq_u16_v: |
10902 | case NEON::BI__builtin_neon_vcvtnq_u32_v: |
10903 | case NEON::BI__builtin_neon_vcvtn_s64_v: |
10904 | case NEON::BI__builtin_neon_vcvtnq_s64_v: |
10905 | case NEON::BI__builtin_neon_vcvtn_u64_v: |
10906 | case NEON::BI__builtin_neon_vcvtnq_u64_v: { |
10907 | Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; |
10908 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
10909 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn"); |
10910 | } |
10911 | case NEON::BI__builtin_neon_vcvtp_s16_v: |
10912 | case NEON::BI__builtin_neon_vcvtp_s32_v: |
10913 | case NEON::BI__builtin_neon_vcvtpq_s16_v: |
10914 | case NEON::BI__builtin_neon_vcvtpq_s32_v: |
10915 | case NEON::BI__builtin_neon_vcvtp_u16_v: |
10916 | case NEON::BI__builtin_neon_vcvtp_u32_v: |
10917 | case NEON::BI__builtin_neon_vcvtpq_u16_v: |
10918 | case NEON::BI__builtin_neon_vcvtpq_u32_v: |
10919 | case NEON::BI__builtin_neon_vcvtp_s64_v: |
10920 | case NEON::BI__builtin_neon_vcvtpq_s64_v: |
10921 | case NEON::BI__builtin_neon_vcvtp_u64_v: |
10922 | case NEON::BI__builtin_neon_vcvtpq_u64_v: { |
10923 | Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; |
10924 | llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; |
10925 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp"); |
10926 | } |
10927 | case NEON::BI__builtin_neon_vmulx_v: |
10928 | case NEON::BI__builtin_neon_vmulxq_v: { |
10929 | Int = Intrinsic::aarch64_neon_fmulx; |
10930 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx"); |
10931 | } |
10932 | case NEON::BI__builtin_neon_vmulxh_lane_f16: |
10933 | case NEON::BI__builtin_neon_vmulxh_laneq_f16: { |
10934 | |
10935 | |
10936 | Ops.push_back(EmitScalarExpr(E->getArg(2))); |
10937 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); |
10938 | Ops.pop_back(); |
10939 | Int = Intrinsic::aarch64_neon_fmulx; |
10940 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx"); |
10941 | } |
10942 | case NEON::BI__builtin_neon_vmul_lane_v: |
10943 | case NEON::BI__builtin_neon_vmul_laneq_v: { |
10944 | |
10945 | bool Quad = false; |
10946 | if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v) |
10947 | Quad = true; |
10948 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
10949 | llvm::FixedVectorType *VTy = |
10950 | GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad)); |
10951 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
10952 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); |
10953 | Value *Result = Builder.CreateFMul(Ops[0], Ops[1]); |
10954 | return Builder.CreateBitCast(Result, Ty); |
10955 | } |
10956 | case NEON::BI__builtin_neon_vnegd_s64: |
10957 | return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd"); |
10958 | case NEON::BI__builtin_neon_vnegh_f16: |
10959 | return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh"); |
10960 | case NEON::BI__builtin_neon_vpmaxnm_v: |
10961 | case NEON::BI__builtin_neon_vpmaxnmq_v: { |
10962 | Int = Intrinsic::aarch64_neon_fmaxnmp; |
10963 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm"); |
10964 | } |
10965 | case NEON::BI__builtin_neon_vpminnm_v: |
10966 | case NEON::BI__builtin_neon_vpminnmq_v: { |
10967 | Int = Intrinsic::aarch64_neon_fminnmp; |
10968 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm"); |
10969 | } |
10970 | case NEON::BI__builtin_neon_vsqrth_f16: { |
10971 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
10972 | Int = Builder.getIsFPConstrained() |
10973 | ? Intrinsic::experimental_constrained_sqrt |
10974 | : Intrinsic::sqrt; |
10975 | return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt"); |
10976 | } |
10977 | case NEON::BI__builtin_neon_vsqrt_v: |
10978 | case NEON::BI__builtin_neon_vsqrtq_v: { |
10979 | Int = Builder.getIsFPConstrained() |
10980 | ? Intrinsic::experimental_constrained_sqrt |
10981 | : Intrinsic::sqrt; |
10982 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
10983 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt"); |
10984 | } |
10985 | case NEON::BI__builtin_neon_vrbit_v: |
10986 | case NEON::BI__builtin_neon_vrbitq_v: { |
10987 | Int = Intrinsic::bitreverse; |
10988 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit"); |
10989 | } |
10990 | case NEON::BI__builtin_neon_vaddv_u8: |
10991 | |
10992 | usgn = true; |
10993 | LLVM_FALLTHROUGH; |
10994 | case NEON::BI__builtin_neon_vaddv_s8: { |
10995 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
10996 | Ty = Int32Ty; |
10997 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
10998 | llvm::Type *Tys[2] = { Ty, VTy }; |
10999 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11000 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11001 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11002 | } |
11003 | case NEON::BI__builtin_neon_vaddv_u16: |
11004 | usgn = true; |
11005 | LLVM_FALLTHROUGH; |
11006 | case NEON::BI__builtin_neon_vaddv_s16: { |
11007 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
11008 | Ty = Int32Ty; |
11009 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11010 | llvm::Type *Tys[2] = { Ty, VTy }; |
11011 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11012 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11013 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11014 | } |
11015 | case NEON::BI__builtin_neon_vaddvq_u8: |
11016 | usgn = true; |
11017 | LLVM_FALLTHROUGH; |
11018 | case NEON::BI__builtin_neon_vaddvq_s8: { |
11019 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
11020 | Ty = Int32Ty; |
11021 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11022 | llvm::Type *Tys[2] = { Ty, VTy }; |
11023 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11024 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11025 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11026 | } |
11027 | case NEON::BI__builtin_neon_vaddvq_u16: |
11028 | usgn = true; |
11029 | LLVM_FALLTHROUGH; |
11030 | case NEON::BI__builtin_neon_vaddvq_s16: { |
11031 | Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; |
11032 | Ty = Int32Ty; |
11033 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11034 | llvm::Type *Tys[2] = { Ty, VTy }; |
11035 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11036 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); |
11037 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11038 | } |
11039 | case NEON::BI__builtin_neon_vmaxv_u8: { |
11040 | Int = Intrinsic::aarch64_neon_umaxv; |
11041 | Ty = Int32Ty; |
11042 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11043 | llvm::Type *Tys[2] = { Ty, VTy }; |
11044 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11045 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11046 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11047 | } |
11048 | case NEON::BI__builtin_neon_vmaxv_u16: { |
11049 | Int = Intrinsic::aarch64_neon_umaxv; |
11050 | Ty = Int32Ty; |
11051 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11052 | llvm::Type *Tys[2] = { Ty, VTy }; |
11053 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11054 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11055 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11056 | } |
11057 | case NEON::BI__builtin_neon_vmaxvq_u8: { |
11058 | Int = Intrinsic::aarch64_neon_umaxv; |
11059 | Ty = Int32Ty; |
11060 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11061 | llvm::Type *Tys[2] = { Ty, VTy }; |
11062 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11063 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11064 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11065 | } |
11066 | case NEON::BI__builtin_neon_vmaxvq_u16: { |
11067 | Int = Intrinsic::aarch64_neon_umaxv; |
11068 | Ty = Int32Ty; |
11069 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11070 | llvm::Type *Tys[2] = { Ty, VTy }; |
11071 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11072 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11073 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11074 | } |
11075 | case NEON::BI__builtin_neon_vmaxv_s8: { |
11076 | Int = Intrinsic::aarch64_neon_smaxv; |
11077 | Ty = Int32Ty; |
11078 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11079 | llvm::Type *Tys[2] = { Ty, VTy }; |
11080 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11081 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11082 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11083 | } |
11084 | case NEON::BI__builtin_neon_vmaxv_s16: { |
11085 | Int = Intrinsic::aarch64_neon_smaxv; |
11086 | Ty = Int32Ty; |
11087 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11088 | llvm::Type *Tys[2] = { Ty, VTy }; |
11089 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11090 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11091 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11092 | } |
11093 | case NEON::BI__builtin_neon_vmaxvq_s8: { |
11094 | Int = Intrinsic::aarch64_neon_smaxv; |
11095 | Ty = Int32Ty; |
11096 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11097 | llvm::Type *Tys[2] = { Ty, VTy }; |
11098 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11099 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11100 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11101 | } |
11102 | case NEON::BI__builtin_neon_vmaxvq_s16: { |
11103 | Int = Intrinsic::aarch64_neon_smaxv; |
11104 | Ty = Int32Ty; |
11105 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11106 | llvm::Type *Tys[2] = { Ty, VTy }; |
11107 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11108 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11109 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11110 | } |
11111 | case NEON::BI__builtin_neon_vmaxv_f16: { |
11112 | Int = Intrinsic::aarch64_neon_fmaxv; |
11113 | Ty = HalfTy; |
11114 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11115 | llvm::Type *Tys[2] = { Ty, VTy }; |
11116 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11117 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11118 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11119 | } |
11120 | case NEON::BI__builtin_neon_vmaxvq_f16: { |
11121 | Int = Intrinsic::aarch64_neon_fmaxv; |
11122 | Ty = HalfTy; |
11123 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11124 | llvm::Type *Tys[2] = { Ty, VTy }; |
11125 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11126 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); |
11127 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11128 | } |
11129 | case NEON::BI__builtin_neon_vminv_u8: { |
11130 | Int = Intrinsic::aarch64_neon_uminv; |
11131 | Ty = Int32Ty; |
11132 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11133 | llvm::Type *Tys[2] = { Ty, VTy }; |
11134 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11135 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11136 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11137 | } |
11138 | case NEON::BI__builtin_neon_vminv_u16: { |
11139 | Int = Intrinsic::aarch64_neon_uminv; |
11140 | Ty = Int32Ty; |
11141 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11142 | llvm::Type *Tys[2] = { Ty, VTy }; |
11143 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11144 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11145 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11146 | } |
11147 | case NEON::BI__builtin_neon_vminvq_u8: { |
11148 | Int = Intrinsic::aarch64_neon_uminv; |
11149 | Ty = Int32Ty; |
11150 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11151 | llvm::Type *Tys[2] = { Ty, VTy }; |
11152 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11153 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11154 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11155 | } |
11156 | case NEON::BI__builtin_neon_vminvq_u16: { |
11157 | Int = Intrinsic::aarch64_neon_uminv; |
11158 | Ty = Int32Ty; |
11159 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11160 | llvm::Type *Tys[2] = { Ty, VTy }; |
11161 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11162 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11163 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11164 | } |
11165 | case NEON::BI__builtin_neon_vminv_s8: { |
11166 | Int = Intrinsic::aarch64_neon_sminv; |
11167 | Ty = Int32Ty; |
11168 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11169 | llvm::Type *Tys[2] = { Ty, VTy }; |
11170 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11171 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11172 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11173 | } |
11174 | case NEON::BI__builtin_neon_vminv_s16: { |
11175 | Int = Intrinsic::aarch64_neon_sminv; |
11176 | Ty = Int32Ty; |
11177 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11178 | llvm::Type *Tys[2] = { Ty, VTy }; |
11179 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11180 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11181 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11182 | } |
11183 | case NEON::BI__builtin_neon_vminvq_s8: { |
11184 | Int = Intrinsic::aarch64_neon_sminv; |
11185 | Ty = Int32Ty; |
11186 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11187 | llvm::Type *Tys[2] = { Ty, VTy }; |
11188 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11189 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11190 | return Builder.CreateTrunc(Ops[0], Int8Ty); |
11191 | } |
11192 | case NEON::BI__builtin_neon_vminvq_s16: { |
11193 | Int = Intrinsic::aarch64_neon_sminv; |
11194 | Ty = Int32Ty; |
11195 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11196 | llvm::Type *Tys[2] = { Ty, VTy }; |
11197 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11198 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11199 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11200 | } |
11201 | case NEON::BI__builtin_neon_vminv_f16: { |
11202 | Int = Intrinsic::aarch64_neon_fminv; |
11203 | Ty = HalfTy; |
11204 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11205 | llvm::Type *Tys[2] = { Ty, VTy }; |
11206 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11207 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11208 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11209 | } |
11210 | case NEON::BI__builtin_neon_vminvq_f16: { |
11211 | Int = Intrinsic::aarch64_neon_fminv; |
11212 | Ty = HalfTy; |
11213 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11214 | llvm::Type *Tys[2] = { Ty, VTy }; |
11215 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11216 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); |
11217 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11218 | } |
11219 | case NEON::BI__builtin_neon_vmaxnmv_f16: { |
11220 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
11221 | Ty = HalfTy; |
11222 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11223 | llvm::Type *Tys[2] = { Ty, VTy }; |
11224 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11225 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); |
11226 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11227 | } |
11228 | case NEON::BI__builtin_neon_vmaxnmvq_f16: { |
11229 | Int = Intrinsic::aarch64_neon_fmaxnmv; |
11230 | Ty = HalfTy; |
11231 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11232 | llvm::Type *Tys[2] = { Ty, VTy }; |
11233 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11234 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); |
11235 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11236 | } |
11237 | case NEON::BI__builtin_neon_vminnmv_f16: { |
11238 | Int = Intrinsic::aarch64_neon_fminnmv; |
11239 | Ty = HalfTy; |
11240 | VTy = llvm::FixedVectorType::get(HalfTy, 4); |
11241 | llvm::Type *Tys[2] = { Ty, VTy }; |
11242 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11243 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); |
11244 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11245 | } |
11246 | case NEON::BI__builtin_neon_vminnmvq_f16: { |
11247 | Int = Intrinsic::aarch64_neon_fminnmv; |
11248 | Ty = HalfTy; |
11249 | VTy = llvm::FixedVectorType::get(HalfTy, 8); |
11250 | llvm::Type *Tys[2] = { Ty, VTy }; |
11251 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11252 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); |
11253 | return Builder.CreateTrunc(Ops[0], HalfTy); |
11254 | } |
11255 | case NEON::BI__builtin_neon_vmul_n_f64: { |
11256 | Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); |
11257 | Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy); |
11258 | return Builder.CreateFMul(Ops[0], RHS); |
11259 | } |
11260 | case NEON::BI__builtin_neon_vaddlv_u8: { |
11261 | Int = Intrinsic::aarch64_neon_uaddlv; |
11262 | Ty = Int32Ty; |
11263 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11264 | llvm::Type *Tys[2] = { Ty, VTy }; |
11265 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11266 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11267 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11268 | } |
11269 | case NEON::BI__builtin_neon_vaddlv_u16: { |
11270 | Int = Intrinsic::aarch64_neon_uaddlv; |
11271 | Ty = Int32Ty; |
11272 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11273 | llvm::Type *Tys[2] = { Ty, VTy }; |
11274 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11275 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11276 | } |
11277 | case NEON::BI__builtin_neon_vaddlvq_u8: { |
11278 | Int = Intrinsic::aarch64_neon_uaddlv; |
11279 | Ty = Int32Ty; |
11280 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11281 | llvm::Type *Tys[2] = { Ty, VTy }; |
11282 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11283 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11284 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11285 | } |
11286 | case NEON::BI__builtin_neon_vaddlvq_u16: { |
11287 | Int = Intrinsic::aarch64_neon_uaddlv; |
11288 | Ty = Int32Ty; |
11289 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11290 | llvm::Type *Tys[2] = { Ty, VTy }; |
11291 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11292 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11293 | } |
11294 | case NEON::BI__builtin_neon_vaddlv_s8: { |
11295 | Int = Intrinsic::aarch64_neon_saddlv; |
11296 | Ty = Int32Ty; |
11297 | VTy = llvm::FixedVectorType::get(Int8Ty, 8); |
11298 | llvm::Type *Tys[2] = { Ty, VTy }; |
11299 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11300 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11301 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11302 | } |
11303 | case NEON::BI__builtin_neon_vaddlv_s16: { |
11304 | Int = Intrinsic::aarch64_neon_saddlv; |
11305 | Ty = Int32Ty; |
11306 | VTy = llvm::FixedVectorType::get(Int16Ty, 4); |
11307 | llvm::Type *Tys[2] = { Ty, VTy }; |
11308 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11309 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11310 | } |
11311 | case NEON::BI__builtin_neon_vaddlvq_s8: { |
11312 | Int = Intrinsic::aarch64_neon_saddlv; |
11313 | Ty = Int32Ty; |
11314 | VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
11315 | llvm::Type *Tys[2] = { Ty, VTy }; |
11316 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11317 | Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11318 | return Builder.CreateTrunc(Ops[0], Int16Ty); |
11319 | } |
11320 | case NEON::BI__builtin_neon_vaddlvq_s16: { |
11321 | Int = Intrinsic::aarch64_neon_saddlv; |
11322 | Ty = Int32Ty; |
11323 | VTy = llvm::FixedVectorType::get(Int16Ty, 8); |
11324 | llvm::Type *Tys[2] = { Ty, VTy }; |
11325 | Ops.push_back(EmitScalarExpr(E->getArg(0))); |
11326 | return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); |
11327 | } |
11328 | case NEON::BI__builtin_neon_vsri_n_v: |
11329 | case NEON::BI__builtin_neon_vsriq_n_v: { |
11330 | Int = Intrinsic::aarch64_neon_vsri; |
11331 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); |
11332 | return EmitNeonCall(Intrin, Ops, "vsri_n"); |
11333 | } |
11334 | case NEON::BI__builtin_neon_vsli_n_v: |
11335 | case NEON::BI__builtin_neon_vsliq_n_v: { |
11336 | Int = Intrinsic::aarch64_neon_vsli; |
11337 | llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty); |
11338 | return EmitNeonCall(Intrin, Ops, "vsli_n"); |
11339 | } |
11340 | case NEON::BI__builtin_neon_vsra_n_v: |
11341 | case NEON::BI__builtin_neon_vsraq_n_v: |
11342 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11343 | Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n"); |
11344 | return Builder.CreateAdd(Ops[0], Ops[1]); |
11345 | case NEON::BI__builtin_neon_vrsra_n_v: |
11346 | case NEON::BI__builtin_neon_vrsraq_n_v: { |
11347 | Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; |
11348 | SmallVector<llvm::Value*,2> TmpOps; |
11349 | TmpOps.push_back(Ops[1]); |
11350 | TmpOps.push_back(Ops[2]); |
11351 | Function* F = CGM.getIntrinsic(Int, Ty); |
11352 | llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true); |
11353 | Ops[0] = Builder.CreateBitCast(Ops[0], VTy); |
11354 | return Builder.CreateAdd(Ops[0], tmp); |
11355 | } |
11356 | case NEON::BI__builtin_neon_vld1_v: |
11357 | case NEON::BI__builtin_neon_vld1q_v: { |
11358 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); |
11359 | return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment()); |
11360 | } |
11361 | case NEON::BI__builtin_neon_vst1_v: |
11362 | case NEON::BI__builtin_neon_vst1q_v: |
11363 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy)); |
11364 | Ops[1] = Builder.CreateBitCast(Ops[1], VTy); |
11365 | return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment()); |
11366 | case NEON::BI__builtin_neon_vld1_lane_v: |
11367 | case NEON::BI__builtin_neon_vld1q_lane_v: { |
11368 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11369 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
11370 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11371 | Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], |
11372 | PtrOp0.getAlignment()); |
11373 | return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane"); |
11374 | } |
11375 | case NEON::BI__builtin_neon_vld1_dup_v: |
11376 | case NEON::BI__builtin_neon_vld1q_dup_v: { |
11377 | Value *V = UndefValue::get(Ty); |
11378 | Ty = llvm::PointerType::getUnqual(VTy->getElementType()); |
11379 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11380 | Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], |
11381 | PtrOp0.getAlignment()); |
11382 | llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); |
11383 | Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); |
11384 | return EmitNeonSplat(Ops[0], CI); |
11385 | } |
11386 | case NEON::BI__builtin_neon_vst1_lane_v: |
11387 | case NEON::BI__builtin_neon_vst1q_lane_v: |
11388 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11389 | Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); |
11390 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11391 | return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty), |
11392 | PtrOp0.getAlignment()); |
11393 | case NEON::BI__builtin_neon_vld2_v: |
11394 | case NEON::BI__builtin_neon_vld2q_v: { |
11395 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
11396 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11397 | llvm::Type *Tys[2] = { VTy, PTy }; |
11398 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); |
11399 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); |
11400 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11401 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11402 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11403 | } |
11404 | case NEON::BI__builtin_neon_vld3_v: |
11405 | case NEON::BI__builtin_neon_vld3q_v: { |
11406 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
11407 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11408 | llvm::Type *Tys[2] = { VTy, PTy }; |
11409 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); |
11410 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); |
11411 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11412 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11413 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11414 | } |
11415 | case NEON::BI__builtin_neon_vld4_v: |
11416 | case NEON::BI__builtin_neon_vld4q_v: { |
11417 | llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); |
11418 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11419 | llvm::Type *Tys[2] = { VTy, PTy }; |
11420 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); |
11421 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); |
11422 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11423 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11424 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11425 | } |
11426 | case NEON::BI__builtin_neon_vld2_dup_v: |
11427 | case NEON::BI__builtin_neon_vld2q_dup_v: { |
11428 | llvm::Type *PTy = |
11429 | llvm::PointerType::getUnqual(VTy->getElementType()); |
11430 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11431 | llvm::Type *Tys[2] = { VTy, PTy }; |
11432 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); |
11433 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); |
11434 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11435 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11436 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11437 | } |
11438 | case NEON::BI__builtin_neon_vld3_dup_v: |
11439 | case NEON::BI__builtin_neon_vld3q_dup_v: { |
11440 | llvm::Type *PTy = |
11441 | llvm::PointerType::getUnqual(VTy->getElementType()); |
11442 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11443 | llvm::Type *Tys[2] = { VTy, PTy }; |
11444 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); |
11445 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); |
11446 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11447 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11448 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11449 | } |
11450 | case NEON::BI__builtin_neon_vld4_dup_v: |
11451 | case NEON::BI__builtin_neon_vld4q_dup_v: { |
11452 | llvm::Type *PTy = |
11453 | llvm::PointerType::getUnqual(VTy->getElementType()); |
11454 | Ops[1] = Builder.CreateBitCast(Ops[1], PTy); |
11455 | llvm::Type *Tys[2] = { VTy, PTy }; |
11456 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); |
11457 | Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); |
11458 | Ops[0] = Builder.CreateBitCast(Ops[0], |
11459 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11460 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11461 | } |
11462 | case NEON::BI__builtin_neon_vld2_lane_v: |
11463 | case NEON::BI__builtin_neon_vld2q_lane_v: { |
11464 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
11465 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys); |
11466 | std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); |
11467 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11468 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11469 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
11470 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane"); |
11471 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11472 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11473 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11474 | } |
11475 | case NEON::BI__builtin_neon_vld3_lane_v: |
11476 | case NEON::BI__builtin_neon_vld3q_lane_v: { |
11477 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
11478 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys); |
11479 | std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); |
11480 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11481 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11482 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); |
11483 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); |
11484 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); |
11485 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11486 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11487 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11488 | } |
11489 | case NEON::BI__builtin_neon_vld4_lane_v: |
11490 | case NEON::BI__builtin_neon_vld4q_lane_v: { |
11491 | llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; |
11492 | Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys); |
11493 | std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); |
11494 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11495 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11496 | Ops[3] = Builder.CreateBitCast(Ops[3], Ty); |
11497 | Ops[4] = Builder.CreateBitCast(Ops[4], Ty); |
11498 | Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty); |
11499 | Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane"); |
11500 | Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); |
11501 | Ops[0] = Builder.CreateBitCast(Ops[0], Ty); |
11502 | return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); |
11503 | } |
11504 | case NEON::BI__builtin_neon_vst2_v: |
11505 | case NEON::BI__builtin_neon_vst2q_v: { |
11506 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11507 | llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; |
11508 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), |
11509 | Ops, ""); |
11510 | } |
11511 | case NEON::BI__builtin_neon_vst2_lane_v: |
11512 | case NEON::BI__builtin_neon_vst2q_lane_v: { |
11513 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11514 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
11515 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
11516 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys), |
11517 | Ops, ""); |
11518 | } |
11519 | case NEON::BI__builtin_neon_vst3_v: |
11520 | case NEON::BI__builtin_neon_vst3q_v: { |
11521 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11522 | llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; |
11523 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), |
11524 | Ops, ""); |
11525 | } |
11526 | case NEON::BI__builtin_neon_vst3_lane_v: |
11527 | case NEON::BI__builtin_neon_vst3q_lane_v: { |
11528 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11529 | Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); |
11530 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
11531 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys), |
11532 | Ops, ""); |
11533 | } |
11534 | case NEON::BI__builtin_neon_vst4_v: |
11535 | case NEON::BI__builtin_neon_vst4q_v: { |
11536 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11537 | llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; |
11538 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), |
11539 | Ops, ""); |
11540 | } |
11541 | case NEON::BI__builtin_neon_vst4_lane_v: |
11542 | case NEON::BI__builtin_neon_vst4q_lane_v: { |
11543 | std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); |
11544 | Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); |
11545 | llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; |
11546 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys), |
11547 | Ops, ""); |
11548 | } |
11549 | case NEON::BI__builtin_neon_vtrn_v: |
11550 | case NEON::BI__builtin_neon_vtrnq_v: { |
11551 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11552 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11553 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11554 | Value *SV = nullptr; |
11555 | |
11556 | for (unsigned vi = 0; vi != 2; ++vi) { |
11557 | SmallVector<int, 16> Indices; |
11558 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
11559 | Indices.push_back(i+vi); |
11560 | Indices.push_back(i+e+vi); |
11561 | } |
11562 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
11563 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); |
11564 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
11565 | } |
11566 | return SV; |
11567 | } |
11568 | case NEON::BI__builtin_neon_vuzp_v: |
11569 | case NEON::BI__builtin_neon_vuzpq_v: { |
11570 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11571 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11572 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11573 | Value *SV = nullptr; |
11574 | |
11575 | for (unsigned vi = 0; vi != 2; ++vi) { |
11576 | SmallVector<int, 16> Indices; |
11577 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) |
11578 | Indices.push_back(2*i+vi); |
11579 | |
11580 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
11581 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); |
11582 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
11583 | } |
11584 | return SV; |
11585 | } |
11586 | case NEON::BI__builtin_neon_vzip_v: |
11587 | case NEON::BI__builtin_neon_vzipq_v: { |
11588 | Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11589 | Ops[1] = Builder.CreateBitCast(Ops[1], Ty); |
11590 | Ops[2] = Builder.CreateBitCast(Ops[2], Ty); |
11591 | Value *SV = nullptr; |
11592 | |
11593 | for (unsigned vi = 0; vi != 2; ++vi) { |
11594 | SmallVector<int, 16> Indices; |
11595 | for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { |
11596 | Indices.push_back((i + vi*e) >> 1); |
11597 | Indices.push_back(((i + vi*e) >> 1)+e); |
11598 | } |
11599 | Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); |
11600 | SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); |
11601 | SV = Builder.CreateDefaultAlignedStore(SV, Addr); |
11602 | } |
11603 | return SV; |
11604 | } |
11605 | case NEON::BI__builtin_neon_vqtbl1q_v: { |
11606 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), |
11607 | Ops, "vtbl1"); |
11608 | } |
11609 | case NEON::BI__builtin_neon_vqtbl2q_v: { |
11610 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), |
11611 | Ops, "vtbl2"); |
11612 | } |
11613 | case NEON::BI__builtin_neon_vqtbl3q_v: { |
11614 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), |
11615 | Ops, "vtbl3"); |
11616 | } |
11617 | case NEON::BI__builtin_neon_vqtbl4q_v: { |
11618 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), |
11619 | Ops, "vtbl4"); |
11620 | } |
11621 | case NEON::BI__builtin_neon_vqtbx1q_v: { |
11622 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), |
11623 | Ops, "vtbx1"); |
11624 | } |
11625 | case NEON::BI__builtin_neon_vqtbx2q_v: { |
11626 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), |
11627 | Ops, "vtbx2"); |
11628 | } |
11629 | case NEON::BI__builtin_neon_vqtbx3q_v: { |
11630 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), |
11631 | Ops, "vtbx3"); |
11632 | } |
11633 | case NEON::BI__builtin_neon_vqtbx4q_v: { |
11634 | return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), |
11635 | Ops, "vtbx4"); |
11636 | } |
11637 | case NEON::BI__builtin_neon_vsqadd_v: |
11638 | case NEON::BI__builtin_neon_vsqaddq_v: { |
11639 | Int = Intrinsic::aarch64_neon_usqadd; |
11640 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd"); |
11641 | } |
11642 | case NEON::BI__builtin_neon_vuqadd_v: |
11643 | case NEON::BI__builtin_neon_vuqaddq_v: { |
11644 | Int = Intrinsic::aarch64_neon_suqadd; |
11645 | return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd"); |
11646 | } |
11647 | } |
11648 | } |
11649 | |
11650 | Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID, |
11651 | const CallExpr *E) { |
11652 | assert((BuiltinID == BPF::BI__builtin_preserve_field_info || |
11653 | BuiltinID == BPF::BI__builtin_btf_type_id || |
11654 | BuiltinID == BPF::BI__builtin_preserve_type_info || |
11655 | BuiltinID == BPF::BI__builtin_preserve_enum_value) && |
11656 | "unexpected BPF builtin"); |
11657 | |
11658 | |
11659 | |
11660 | |
11661 | static uint32_t BuiltinSeqNum; |
11662 | |
11663 | switch (BuiltinID) { |
11664 | default: |
11665 | llvm_unreachable("Unexpected BPF builtin"); |
11666 | case BPF::BI__builtin_preserve_field_info: { |
11667 | const Expr *Arg = E->getArg(0); |
11668 | bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField; |
11669 | |
11670 | if (!getDebugInfo()) { |
11671 | CGM.Error(E->getExprLoc(), |
11672 | "using __builtin_preserve_field_info() without -g"); |
11673 | return IsBitField ? EmitLValue(Arg).getBitFieldPointer() |
11674 | : EmitLValue(Arg).getPointer(*this); |
11675 | } |
11676 | |
11677 | |
11678 | bool OldIsInPreservedAIRegion = IsInPreservedAIRegion; |
11679 | IsInPreservedAIRegion = true; |
11680 | Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer() |
11681 | : EmitLValue(Arg).getPointer(*this); |
11682 | IsInPreservedAIRegion = OldIsInPreservedAIRegion; |
11683 | |
11684 | ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
11685 | Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue()); |
11686 | |
11687 | |
11688 | llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration( |
11689 | &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info, |
11690 | {FieldAddr->getType()}); |
11691 | return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind}); |
11692 | } |
11693 | case BPF::BI__builtin_btf_type_id: |
11694 | case BPF::BI__builtin_preserve_type_info: { |
11695 | if (!getDebugInfo()) { |
11696 | CGM.Error(E->getExprLoc(), "using builtin function without -g"); |
11697 | return nullptr; |
11698 | } |
11699 | |
11700 | const Expr *Arg0 = E->getArg(0); |
11701 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( |
11702 | Arg0->getType(), Arg0->getExprLoc()); |
11703 | |
11704 | ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
11705 | Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue()); |
11706 | Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++); |
11707 | |
11708 | llvm::Function *FnDecl; |
11709 | if (BuiltinID == BPF::BI__builtin_btf_type_id) |
11710 | FnDecl = llvm::Intrinsic::getDeclaration( |
11711 | &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {}); |
11712 | else |
11713 | FnDecl = llvm::Intrinsic::getDeclaration( |
11714 | &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {}); |
11715 | CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue}); |
11716 | Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); |
11717 | return Fn; |
11718 | } |
11719 | case BPF::BI__builtin_preserve_enum_value: { |
11720 | if (!getDebugInfo()) { |
11721 | CGM.Error(E->getExprLoc(), "using builtin function without -g"); |
11722 | return nullptr; |
11723 | } |
11724 | |
11725 | const Expr *Arg0 = E->getArg(0); |
11726 | llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( |
11727 | Arg0->getType(), Arg0->getExprLoc()); |
11728 | |
11729 | |
11730 | const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens()); |
11731 | const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr()); |
11732 | const auto *DR = cast<DeclRefExpr>(CE->getSubExpr()); |
11733 | const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl()); |
11734 | |
11735 | auto &InitVal = Enumerator->getInitVal(); |
11736 | std::string InitValStr; |
11737 | if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX)) |
11738 | InitValStr = std::to_string(InitVal.getSExtValue()); |
11739 | else |
11740 | InitValStr = std::to_string(InitVal.getZExtValue()); |
11741 | std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr; |
11742 | Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr); |
11743 | |
11744 | ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); |
11745 | Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue()); |
11746 | Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++); |
11747 | |
11748 | llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration( |
11749 | &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {}); |
11750 | CallInst *Fn = |
11751 | Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue}); |
11752 | Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); |
11753 | return Fn; |
11754 | } |
11755 | } |
11756 | } |
11757 | |
11758 | llvm::Value *CodeGenFunction:: |
11759 | BuildVector(ArrayRef<llvm::Value*> Ops) { |
11760 | assert((Ops.size() & (Ops.size() - 1)) == 0 && |
11761 | "Not a power-of-two sized vector!"); |
11762 | bool AllConstants = true; |
11763 | for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) |
11764 | AllConstants &= isa<Constant>(Ops[i]); |
11765 | |
11766 | |
11767 | if (AllConstants) { |
11768 | SmallVector<llvm::Constant*, 16> CstOps; |
11769 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
11770 | CstOps.push_back(cast<Constant>(Ops[i])); |
11771 | return llvm::ConstantVector::get(CstOps); |
11772 | } |
11773 | |
11774 | |
11775 | Value *Result = llvm::UndefValue::get( |
11776 | llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size())); |
11777 | |
11778 | for (unsigned i = 0, e = Ops.size(); i != e; ++i) |
11779 | Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i)); |
11780 | |
11781 | return Result; |
11782 | } |
11783 | |
11784 | |
11785 | static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask, |
11786 | unsigned NumElts) { |
11787 | |
11788 | auto *MaskTy = llvm::FixedVectorType::get( |
11789 | CGF.Builder.getInt1Ty(), |
11790 | cast<IntegerType>(Mask->getType())->getBitWidth()); |
11791 | Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy); |
11792 | |
11793 | |
11794 | |
11795 | if (NumElts < 8) { |
11796 | int Indices[4]; |
11797 | for (unsigned i = 0; i != NumElts; ++i) |
11798 | Indices[i] = i; |
11799 | MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec, |
11800 | makeArrayRef(Indices, NumElts), |
11801 | "extract"); |
11802 | } |
11803 | return MaskVec; |
11804 | } |
11805 | |
11806 | static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
11807 | Align Alignment) { |
11808 | |
11809 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
11810 | llvm::PointerType::getUnqual(Ops[1]->getType())); |
11811 | |
11812 | Value *MaskVec = getMaskVecValue( |
11813 | CGF, Ops[2], |
11814 | cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements()); |
11815 | |
11816 | return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec); |
11817 | } |
11818 | |
11819 | static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
11820 | Align Alignment) { |
11821 | |
11822 | llvm::Type *Ty = Ops[1]->getType(); |
11823 | Value *Ptr = |
11824 | CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); |
11825 | |
11826 | Value *MaskVec = getMaskVecValue( |
11827 | CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements()); |
11828 | |
11829 | return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]); |
11830 | } |
11831 | |
11832 | static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, |
11833 | ArrayRef<Value *> Ops) { |
11834 | auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType()); |
11835 | llvm::Type *PtrTy = ResultTy->getElementType(); |
11836 | |
11837 | |
11838 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
11839 | llvm::PointerType::getUnqual(PtrTy)); |
11840 | |
11841 | Value *MaskVec = getMaskVecValue( |
11842 | CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements()); |
11843 | |
11844 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, |
11845 | ResultTy); |
11846 | return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] }); |
11847 | } |
11848 | |
11849 | static Value *EmitX86CompressExpand(CodeGenFunction &CGF, |
11850 | ArrayRef<Value *> Ops, |
11851 | bool IsCompress) { |
11852 | auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType()); |
11853 | |
11854 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); |
11855 | |
11856 | Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress |
11857 | : Intrinsic::x86_avx512_mask_expand; |
11858 | llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy); |
11859 | return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec }); |
11860 | } |
11861 | |
11862 | static Value *EmitX86CompressStore(CodeGenFunction &CGF, |
11863 | ArrayRef<Value *> Ops) { |
11864 | auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType()); |
11865 | llvm::Type *PtrTy = ResultTy->getElementType(); |
11866 | |
11867 | |
11868 | Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], |
11869 | llvm::PointerType::getUnqual(PtrTy)); |
11870 | |
11871 | Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); |
11872 | |
11873 | llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, |
11874 | ResultTy); |
11875 | return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec }); |
11876 | } |
11877 | |
11878 | static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc, |
11879 | ArrayRef<Value *> Ops, |
11880 | bool InvertLHS = false) { |
11881 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
11882 | Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts); |
11883 | Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts); |
11884 | |
11885 | if (InvertLHS) |
11886 | LHS = CGF.Builder.CreateNot(LHS); |
11887 | |
11888 | return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS), |
11889 | Ops[0]->getType()); |
11890 | } |
11891 | |
11892 | static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1, |
11893 | Value *Amt, bool IsRight) { |
11894 | llvm::Type *Ty = Op0->getType(); |
11895 | |
11896 | |
11897 | |
11898 | |
11899 | if (Amt->getType() != Ty) { |
11900 | unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements(); |
11901 | Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false); |
11902 | Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt); |
11903 | } |
11904 | |
11905 | unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl; |
11906 | Function *F = CGF.CGM.getIntrinsic(IID, Ty); |
11907 | return CGF.Builder.CreateCall(F, {Op0, Op1, Amt}); |
11908 | } |
11909 | |
11910 | static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops, |
11911 | bool IsSigned) { |
11912 | Value *Op0 = Ops[0]; |
11913 | Value *Op1 = Ops[1]; |
11914 | llvm::Type *Ty = Op0->getType(); |
11915 | uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
11916 | |
11917 | CmpInst::Predicate Pred; |
11918 | switch (Imm) { |
11919 | case 0x0: |
11920 | Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; |
11921 | break; |
11922 | case 0x1: |
11923 | Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; |
11924 | break; |
11925 | case 0x2: |
11926 | Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; |
11927 | break; |
11928 | case 0x3: |
11929 | Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; |
11930 | break; |
11931 | case 0x4: |
11932 | Pred = ICmpInst::ICMP_EQ; |
11933 | break; |
11934 | case 0x5: |
11935 | Pred = ICmpInst::ICMP_NE; |
11936 | break; |
11937 | case 0x6: |
11938 | return llvm::Constant::getNullValue(Ty); |
11939 | case 0x7: |
11940 | return llvm::Constant::getAllOnesValue(Ty); |
11941 | default: |
11942 | llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate"); |
11943 | } |
11944 | |
11945 | Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1); |
11946 | Value *Res = CGF.Builder.CreateSExt(Cmp, Ty); |
11947 | return Res; |
11948 | } |
11949 | |
11950 | static Value *EmitX86Select(CodeGenFunction &CGF, |
11951 | Value *Mask, Value *Op0, Value *Op1) { |
11952 | |
11953 | |
11954 | if (const auto *C = dyn_cast<Constant>(Mask)) |
11955 | if (C->isAllOnesValue()) |
11956 | return Op0; |
11957 | |
11958 | Mask = getMaskVecValue( |
11959 | CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements()); |
11960 | |
11961 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); |
11962 | } |
11963 | |
11964 | static Value *EmitX86ScalarSelect(CodeGenFunction &CGF, |
11965 | Value *Mask, Value *Op0, Value *Op1) { |
11966 | |
11967 | if (const auto *C = dyn_cast<Constant>(Mask)) |
11968 | if (C->isAllOnesValue()) |
11969 | return Op0; |
11970 | |
11971 | auto *MaskTy = llvm::FixedVectorType::get( |
11972 | CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth()); |
11973 | Mask = CGF.Builder.CreateBitCast(Mask, MaskTy); |
11974 | Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0); |
11975 | return CGF.Builder.CreateSelect(Mask, Op0, Op1); |
11976 | } |
11977 | |
11978 | static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp, |
11979 | unsigned NumElts, Value *MaskIn) { |
11980 | if (MaskIn) { |
11981 | const auto *C = dyn_cast<Constant>(MaskIn); |
11982 | if (!C || !C->isAllOnesValue()) |
11983 | Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts)); |
11984 | } |
11985 | |
11986 | if (NumElts < 8) { |
11987 | int Indices[8]; |
11988 | for (unsigned i = 0; i != NumElts; ++i) |
11989 | Indices[i] = i; |
11990 | for (unsigned i = NumElts; i != 8; ++i) |
11991 | Indices[i] = i % NumElts + NumElts; |
11992 | Cmp = CGF.Builder.CreateShuffleVector( |
11993 | Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices); |
11994 | } |
11995 | |
11996 | return CGF.Builder.CreateBitCast(Cmp, |
11997 | IntegerType::get(CGF.getLLVMContext(), |
11998 | std::max(NumElts, 8U))); |
11999 | } |
12000 | |
12001 | static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC, |
12002 | bool Signed, ArrayRef<Value *> Ops) { |
12003 | assert((Ops.size() == 2 || Ops.size() == 4) && |
12004 | "Unexpected number of arguments"); |
12005 | unsigned NumElts = |
12006 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
12007 | Value *Cmp; |
12008 | |
12009 | if (CC == 3) { |
12010 | Cmp = Constant::getNullValue( |
12011 | llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts)); |
12012 | } else if (CC == 7) { |
12013 | Cmp = Constant::getAllOnesValue( |
12014 | llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts)); |
12015 | } else { |
12016 | ICmpInst::Predicate Pred; |
12017 | switch (CC) { |
12018 | default: llvm_unreachable("Unknown condition code"); |
12019 | case 0: Pred = ICmpInst::ICMP_EQ; break; |
12020 | case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; |
12021 | case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; |
12022 | case 4: Pred = ICmpInst::ICMP_NE; break; |
12023 | case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; |
12024 | case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; |
12025 | } |
12026 | Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]); |
12027 | } |
12028 | |
12029 | Value *MaskIn = nullptr; |
12030 | if (Ops.size() == 4) |
12031 | MaskIn = Ops[3]; |
12032 | |
12033 | return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn); |
12034 | } |
12035 | |
12036 | static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) { |
12037 | Value *Zero = Constant::getNullValue(In->getType()); |
12038 | return EmitX86MaskedCompare(CGF, 1, true, { In, Zero }); |
12039 | } |
12040 | |
12041 | static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E, |
12042 | ArrayRef<Value *> Ops, bool IsSigned) { |
12043 | unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue(); |
12044 | llvm::Type *Ty = Ops[1]->getType(); |
12045 | |
12046 | Value *Res; |
12047 | if (Rnd != 4) { |
12048 | Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round |
12049 | : Intrinsic::x86_avx512_uitofp_round; |
12050 | Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() }); |
12051 | Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] }); |
12052 | } else { |
12053 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
12054 | Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty) |
12055 | : CGF.Builder.CreateUIToFP(Ops[0], Ty); |
12056 | } |
12057 | |
12058 | return EmitX86Select(CGF, Ops[2], Res, Ops[1]); |
12059 | } |
12060 | |
12061 | |
12062 | static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E, |
12063 | ArrayRef<Value *> Ops, unsigned BuiltinID, |
12064 | bool IsAddSub) { |
12065 | |
12066 | bool Subtract = false; |
12067 | Intrinsic::ID IID = Intrinsic::not_intrinsic; |
12068 | switch (BuiltinID) { |
12069 | default: break; |
12070 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
12071 | Subtract = true; |
12072 | LLVM_FALLTHROUGH; |
12073 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
12074 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
12075 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
12076 | IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break; |
12077 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
12078 | Subtract = true; |
12079 | LLVM_FALLTHROUGH; |
12080 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
12081 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
12082 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
12083 | IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break; |
12084 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
12085 | Subtract = true; |
12086 | LLVM_FALLTHROUGH; |
12087 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
12088 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
12089 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
12090 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512; |
12091 | break; |
12092 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
12093 | Subtract = true; |
12094 | LLVM_FALLTHROUGH; |
12095 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
12096 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
12097 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
12098 | IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512; |
12099 | break; |
12100 | } |
12101 | |
12102 | Value *A = Ops[0]; |
12103 | Value *B = Ops[1]; |
12104 | Value *C = Ops[2]; |
12105 | |
12106 | if (Subtract) |
12107 | C = CGF.Builder.CreateFNeg(C); |
12108 | |
12109 | Value *Res; |
12110 | |
12111 | |
12112 | if (IID != Intrinsic::not_intrinsic && |
12113 | (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 || |
12114 | IsAddSub)) { |
12115 | Function *Intr = CGF.CGM.getIntrinsic(IID); |
12116 | Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() }); |
12117 | } else { |
12118 | llvm::Type *Ty = A->getType(); |
12119 | Function *FMA; |
12120 | if (CGF.Builder.getIsFPConstrained()) { |
12121 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
12122 | FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty); |
12123 | Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C}); |
12124 | } else { |
12125 | FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty); |
12126 | Res = CGF.Builder.CreateCall(FMA, {A, B, C}); |
12127 | } |
12128 | } |
12129 | |
12130 | |
12131 | Value *MaskFalseVal = nullptr; |
12132 | switch (BuiltinID) { |
12133 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask: |
12134 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: |
12135 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: |
12136 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
12137 | MaskFalseVal = Ops[0]; |
12138 | break; |
12139 | case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: |
12140 | case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: |
12141 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
12142 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
12143 | MaskFalseVal = Constant::getNullValue(Ops[0]->getType()); |
12144 | break; |
12145 | case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: |
12146 | case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: |
12147 | case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: |
12148 | case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: |
12149 | case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
12150 | case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
12151 | case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
12152 | case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
12153 | MaskFalseVal = Ops[2]; |
12154 | break; |
12155 | } |
12156 | |
12157 | if (MaskFalseVal) |
12158 | return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal); |
12159 | |
12160 | return Res; |
12161 | } |
12162 | |
12163 | static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E, |
12164 | MutableArrayRef<Value *> Ops, Value *Upper, |
12165 | bool ZeroMask = false, unsigned PTIdx = 0, |
12166 | bool NegAcc = false) { |
12167 | unsigned Rnd = 4; |
12168 | if (Ops.size() > 4) |
12169 | Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); |
12170 | |
12171 | if (NegAcc) |
12172 | Ops[2] = CGF.Builder.CreateFNeg(Ops[2]); |
12173 | |
12174 | Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0); |
12175 | Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
12176 | Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
12177 | Value *Res; |
12178 | if (Rnd != 4) { |
12179 | Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ? |
12180 | Intrinsic::x86_avx512_vfmadd_f32 : |
12181 | Intrinsic::x86_avx512_vfmadd_f64; |
12182 | Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), |
12183 | {Ops[0], Ops[1], Ops[2], Ops[4]}); |
12184 | } else if (CGF.Builder.getIsFPConstrained()) { |
12185 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); |
12186 | Function *FMA = CGF.CGM.getIntrinsic( |
12187 | Intrinsic::experimental_constrained_fma, Ops[0]->getType()); |
12188 | Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3)); |
12189 | } else { |
12190 | Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType()); |
12191 | Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3)); |
12192 | } |
12193 | |
12194 | if (Ops.size() > 3) { |
12195 | Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType()) |
12196 | : Ops[PTIdx]; |
12197 | |
12198 | |
12199 | |
12200 | |
12201 | if (NegAcc && PTIdx == 2) |
12202 | PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0); |
12203 | |
12204 | Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru); |
12205 | } |
12206 | return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0); |
12207 | } |
12208 | |
12209 | static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned, |
12210 | ArrayRef<Value *> Ops) { |
12211 | llvm::Type *Ty = Ops[0]->getType(); |
12212 | |
12213 | Ty = llvm::FixedVectorType::get(CGF.Int64Ty, |
12214 | Ty->getPrimitiveSizeInBits() / 64); |
12215 | Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty); |
12216 | Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty); |
12217 | |
12218 | if (IsSigned) { |
12219 | |
12220 | Constant *ShiftAmt = ConstantInt::get(Ty, 32); |
12221 | LHS = CGF.Builder.CreateShl(LHS, ShiftAmt); |
12222 | LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt); |
12223 | RHS = CGF.Builder.CreateShl(RHS, ShiftAmt); |
12224 | RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt); |
12225 | } else { |
12226 | |
12227 | Constant *Mask = ConstantInt::get(Ty, 0xffffffff); |
12228 | LHS = CGF.Builder.CreateAnd(LHS, Mask); |
12229 | RHS = CGF.Builder.CreateAnd(RHS, Mask); |
12230 | } |
12231 | |
12232 | return CGF.Builder.CreateMul(LHS, RHS); |
12233 | } |
12234 | |
12235 | |
12236 | |
12237 | |
12238 | static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask, |
12239 | ArrayRef<Value *> Ops) { |
12240 | llvm::Type *Ty = Ops[0]->getType(); |
12241 | |
12242 | unsigned VecWidth = Ty->getPrimitiveSizeInBits(); |
12243 | unsigned EltWidth = Ty->getScalarSizeInBits(); |
12244 | Intrinsic::ID IID; |
12245 | if (VecWidth == 128 && EltWidth == 32) |
12246 | IID = Intrinsic::x86_avx512_pternlog_d_128; |
12247 | else if (VecWidth == 256 && EltWidth == 32) |
12248 | IID = Intrinsic::x86_avx512_pternlog_d_256; |
12249 | else if (VecWidth == 512 && EltWidth == 32) |
12250 | IID = Intrinsic::x86_avx512_pternlog_d_512; |
12251 | else if (VecWidth == 128 && EltWidth == 64) |
12252 | IID = Intrinsic::x86_avx512_pternlog_q_128; |
12253 | else if (VecWidth == 256 && EltWidth == 64) |
12254 | IID = Intrinsic::x86_avx512_pternlog_q_256; |
12255 | else if (VecWidth == 512 && EltWidth == 64) |
12256 | IID = Intrinsic::x86_avx512_pternlog_q_512; |
12257 | else |
12258 | llvm_unreachable("Unexpected intrinsic"); |
12259 | |
12260 | Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), |
12261 | Ops.drop_back()); |
12262 | Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0]; |
12263 | return EmitX86Select(CGF, Ops[4], Ternlog, PassThru); |
12264 | } |
12265 | |
12266 | static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, |
12267 | llvm::Type *DstTy) { |
12268 | unsigned NumberOfElements = |
12269 | cast<llvm::FixedVectorType>(DstTy)->getNumElements(); |
12270 | Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); |
12271 | return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); |
12272 | } |
12273 | |
12274 | |
12275 | static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF, |
12276 | ArrayRef<Value *> Ops, Intrinsic::ID IID) { |
12277 | llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType()); |
12278 | return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]}); |
12279 | } |
12280 | |
12281 | Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) { |
12282 | const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts(); |
12283 | StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString(); |
12284 | return EmitX86CpuIs(CPUStr); |
12285 | } |
12286 | |
12287 | |
12288 | static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF, |
12289 | ArrayRef<Value *> Ops, |
12290 | llvm::Type *DstTy) { |
12291 | assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) && |
12292 | "Unknown cvtph2ps intrinsic"); |
12293 | |
12294 | |
12295 | if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) { |
12296 | Function *F = |
12297 | CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512); |
12298 | return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]}); |
12299 | } |
12300 | |
12301 | unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); |
12302 | Value *Src = Ops[0]; |
12303 | |
12304 | |
12305 | if (NumDstElts != |
12306 | cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) { |
12307 | assert(NumDstElts == 4 && "Unexpected vector size"); |
12308 | Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3}); |
12309 | } |
12310 | |
12311 | |
12312 | auto *HalfTy = llvm::FixedVectorType::get( |
12313 | llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts); |
12314 | Src = CGF.Builder.CreateBitCast(Src, HalfTy); |
12315 | |
12316 | |
12317 | Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps"); |
12318 | |
12319 | if (Ops.size() >= 3) |
12320 | Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]); |
12321 | return Res; |
12322 | } |
12323 | |
12324 | |
12325 | static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF, |
12326 | const CallExpr *E, |
12327 | ArrayRef<Value *> Ops) { |
12328 | llvm::Type *Int32Ty = CGF.Builder.getInt32Ty(); |
12329 | Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty); |
12330 | Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16); |
12331 | llvm::Type *ResultType = CGF.ConvertType(E->getType()); |
12332 | Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType); |
12333 | return BitCast; |
12334 | } |
12335 | |
12336 | Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) { |
12337 | |
12338 | llvm::Type *Int32Ty = Builder.getInt32Ty(); |
12339 | |
12340 | |
12341 | |
12342 | |
12343 | |
12344 | |
12345 | |
12346 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, |
12347 | llvm::ArrayType::get(Int32Ty, 1)); |
12348 | |
12349 | |
12350 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); |
12351 | cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); |
12352 | |
12353 | |
12354 | |
12355 | unsigned Index; |
12356 | unsigned Value; |
12357 | std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr) |
12358 | #define X86_VENDOR(ENUM, STRING) \ |
12359 | .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12360 | #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \ |
12361 | .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12362 | #define X86_CPU_TYPE(ENUM, STR) \ |
12363 | .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12364 | #define X86_CPU_SUBTYPE(ENUM, STR) \ |
12365 | .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)}) |
12366 | #include "llvm/Support/X86TargetParser.def" |
12367 | .Default({0, 0}); |
12368 | assert(Value != 0 && "Invalid CPUStr passed to CpuIs"); |
12369 | |
12370 | |
12371 | llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), |
12372 | ConstantInt::get(Int32Ty, Index)}; |
12373 | llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs); |
12374 | CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue, |
12375 | CharUnits::fromQuantity(4)); |
12376 | |
12377 | |
12378 | return Builder.CreateICmpEQ(CpuValue, |
12379 | llvm::ConstantInt::get(Int32Ty, Value)); |
12380 | } |
12381 | |
12382 | Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) { |
12383 | const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts(); |
12384 | StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString(); |
12385 | return EmitX86CpuSupports(FeatureStr); |
12386 | } |
12387 | |
12388 | uint64_t |
12389 | CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) { |
12390 | |
12391 | uint64_t FeaturesMask = 0; |
12392 | for (const StringRef &FeatureStr : FeatureStrs) { |
12393 | unsigned Feature = |
12394 | StringSwitch<unsigned>(FeatureStr) |
12395 | #define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM) |
12396 | #include "llvm/Support/X86TargetParser.def" |
12397 | ; |
12398 | FeaturesMask |= (1ULL << Feature); |
12399 | } |
12400 | return FeaturesMask; |
12401 | } |
12402 | |
12403 | Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) { |
12404 | return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs)); |
12405 | } |
12406 | |
12407 | llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) { |
12408 | uint32_t Features1 = Lo_32(FeaturesMask); |
12409 | uint32_t Features2 = Hi_32(FeaturesMask); |
12410 | |
12411 | Value *Result = Builder.getTrue(); |
12412 | |
12413 | if (Features1 != 0) { |
12414 | |
12415 | |
12416 | |
12417 | |
12418 | |
12419 | |
12420 | llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, |
12421 | llvm::ArrayType::get(Int32Ty, 1)); |
12422 | |
12423 | |
12424 | llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model"); |
12425 | cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); |
12426 | |
12427 | |
12428 | |
12429 | Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3), |
12430 | Builder.getInt32(0)}; |
12431 | Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs); |
12432 | Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures, |
12433 | CharUnits::fromQuantity(4)); |
12434 | |
12435 | |
12436 | Value *Mask = Builder.getInt32(Features1); |
12437 | Value *Bitset = Builder.CreateAnd(Features, Mask); |
12438 | Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); |
12439 | Result = Builder.CreateAnd(Result, Cmp); |
12440 | } |
12441 | |
12442 | if (Features2 != 0) { |
12443 | llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty, |
12444 | "__cpu_features2"); |
12445 | cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true); |
12446 | |
12447 | Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2, |
12448 | CharUnits::fromQuantity(4)); |
12449 | |
12450 | |
12451 | Value *Mask = Builder.getInt32(Features2); |
12452 | Value *Bitset = Builder.CreateAnd(Features, Mask); |
12453 | Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); |
12454 | Result = Builder.CreateAnd(Result, Cmp); |
12455 | } |
12456 | |
12457 | return Result; |
12458 | } |
12459 | |
12460 | Value *CodeGenFunction::EmitX86CpuInit() { |
12461 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, |
12462 | false); |
12463 | llvm::FunctionCallee Func = |
12464 | CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init"); |
12465 | cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true); |
12466 | cast<llvm::GlobalValue>(Func.getCallee()) |
12467 | ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); |
12468 | return Builder.CreateCall(Func); |
12469 | } |
12470 | |
12471 | Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, |
12472 | const CallExpr *E) { |
12473 | if (BuiltinID == X86::BI__builtin_cpu_is) |
12474 | return EmitX86CpuIs(E); |
12475 | if (BuiltinID == X86::BI__builtin_cpu_supports) |
12476 | return EmitX86CpuSupports(E); |
12477 | if (BuiltinID == X86::BI__builtin_cpu_init) |
12478 | return EmitX86CpuInit(); |
12479 | |
12480 | |
12481 | |
12482 | if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID)) |
12483 | return EmitMSVCBuiltinExpr(*MsvcIntId, E); |
12484 | |
12485 | SmallVector<Value*, 4> Ops; |
12486 | bool IsMaskFCmp = false; |
12487 | |
12488 | |
12489 | unsigned ICEArguments = 0; |
12490 | ASTContext::GetBuiltinTypeError Error; |
12491 | getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); |
12492 | assert(Error == ASTContext::GE_None && "Should not codegen an error"); |
12493 | |
12494 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { |
12495 | |
12496 | if ((ICEArguments & (1 << i)) == 0) { |
12497 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
12498 | continue; |
12499 | } |
12500 | |
12501 | |
12502 | |
12503 | Ops.push_back(llvm::ConstantInt::get( |
12504 | getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext()))); |
12505 | } |
12506 | |
12507 | |
12508 | |
12509 | |
12510 | |
12511 | |
12512 | |
12513 | auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) { |
12514 | Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm)); |
12515 | llvm::Function *F = CGM.getIntrinsic(ID); |
12516 | return Builder.CreateCall(F, Ops); |
12517 | }; |
12518 | |
12519 | |
12520 | |
12521 | |
12522 | |
12523 | |
12524 | auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred, |
12525 | bool IsSignaling) { |
12526 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
12527 | Value *Cmp; |
12528 | if (IsSignaling) |
12529 | Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); |
12530 | else |
12531 | Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); |
12532 | llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType()); |
12533 | llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy); |
12534 | Value *Sext = Builder.CreateSExt(Cmp, IntVecTy); |
12535 | return Builder.CreateBitCast(Sext, FPVecTy); |
12536 | }; |
12537 | |
12538 | switch (BuiltinID) { |
12539 | default: return nullptr; |
12540 | case X86::BI_mm_prefetch: { |
12541 | Value *Address = Ops[0]; |
12542 | ConstantInt *C = cast<ConstantInt>(Ops[1]); |
12543 | Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1); |
12544 | Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3); |
12545 | Value *Data = ConstantInt::get(Int32Ty, 1); |
12546 | Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); |
12547 | return Builder.CreateCall(F, {Address, RW, Locality, Data}); |
12548 | } |
12549 | case X86::BI_mm_clflush: { |
12550 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush), |
12551 | Ops[0]); |
12552 | } |
12553 | case X86::BI_mm_lfence: { |
12554 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence)); |
12555 | } |
12556 | case X86::BI_mm_mfence: { |
12557 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence)); |
12558 | } |
12559 | case X86::BI_mm_sfence: { |
12560 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence)); |
12561 | } |
12562 | case X86::BI_mm_pause: { |
12563 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause)); |
12564 | } |
12565 | case X86::BI__rdtsc: { |
12566 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc)); |
12567 | } |
12568 | case X86::BI__builtin_ia32_rdtscp: { |
12569 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp)); |
12570 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1), |
12571 | Ops[0]); |
12572 | return Builder.CreateExtractValue(Call, 0); |
12573 | } |
12574 | case X86::BI__builtin_ia32_lzcnt_u16: |
12575 | case X86::BI__builtin_ia32_lzcnt_u32: |
12576 | case X86::BI__builtin_ia32_lzcnt_u64: { |
12577 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); |
12578 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
12579 | } |
12580 | case X86::BI__builtin_ia32_tzcnt_u16: |
12581 | case X86::BI__builtin_ia32_tzcnt_u32: |
12582 | case X86::BI__builtin_ia32_tzcnt_u64: { |
12583 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType()); |
12584 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
12585 | } |
12586 | case X86::BI__builtin_ia32_undef128: |
12587 | case X86::BI__builtin_ia32_undef256: |
12588 | case X86::BI__builtin_ia32_undef512: |
12589 | |
12590 | |
12591 | |
12592 | |
12593 | |
12594 | return llvm::Constant::getNullValue(ConvertType(E->getType())); |
12595 | case X86::BI__builtin_ia32_vec_init_v8qi: |
12596 | case X86::BI__builtin_ia32_vec_init_v4hi: |
12597 | case X86::BI__builtin_ia32_vec_init_v2si: |
12598 | return Builder.CreateBitCast(BuildVector(Ops), |
12599 | llvm::Type::getX86_MMXTy(getLLVMContext())); |
12600 | case X86::BI__builtin_ia32_vec_ext_v2si: |
12601 | case X86::BI__builtin_ia32_vec_ext_v16qi: |
12602 | case X86::BI__builtin_ia32_vec_ext_v8hi: |
12603 | case X86::BI__builtin_ia32_vec_ext_v4si: |
12604 | case X86::BI__builtin_ia32_vec_ext_v4sf: |
12605 | case X86::BI__builtin_ia32_vec_ext_v2di: |
12606 | case X86::BI__builtin_ia32_vec_ext_v32qi: |
12607 | case X86::BI__builtin_ia32_vec_ext_v16hi: |
12608 | case X86::BI__builtin_ia32_vec_ext_v8si: |
12609 | case X86::BI__builtin_ia32_vec_ext_v4di: { |
12610 | unsigned NumElts = |
12611 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
12612 | uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue(); |
12613 | Index &= NumElts - 1; |
12614 | |
12615 | |
12616 | return Builder.CreateExtractElement(Ops[0], Index); |
12617 | } |
12618 | case X86::BI__builtin_ia32_vec_set_v16qi: |
12619 | case X86::BI__builtin_ia32_vec_set_v8hi: |
12620 | case X86::BI__builtin_ia32_vec_set_v4si: |
12621 | case X86::BI__builtin_ia32_vec_set_v2di: |
12622 | case X86::BI__builtin_ia32_vec_set_v32qi: |
12623 | case X86::BI__builtin_ia32_vec_set_v16hi: |
12624 | case X86::BI__builtin_ia32_vec_set_v8si: |
12625 | case X86::BI__builtin_ia32_vec_set_v4di: { |
12626 | unsigned NumElts = |
12627 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
12628 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); |
12629 | Index &= NumElts - 1; |
12630 | |
12631 | |
12632 | return Builder.CreateInsertElement(Ops[0], Ops[1], Index); |
12633 | } |
12634 | case X86::BI_mm_setcsr: |
12635 | case X86::BI__builtin_ia32_ldmxcsr: { |
12636 | Address Tmp = CreateMemTemp(E->getArg(0)->getType()); |
12637 | Builder.CreateStore(Ops[0], Tmp); |
12638 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), |
12639 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); |
12640 | } |
12641 | case X86::BI_mm_getcsr: |
12642 | case X86::BI__builtin_ia32_stmxcsr: { |
12643 | Address Tmp = CreateMemTemp(E->getType()); |
12644 | Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), |
12645 | Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); |
12646 | return Builder.CreateLoad(Tmp, "stmxcsr"); |
12647 | } |
12648 | case X86::BI__builtin_ia32_xsave: |
12649 | case X86::BI__builtin_ia32_xsave64: |
12650 | case X86::BI__builtin_ia32_xrstor: |
12651 | case X86::BI__builtin_ia32_xrstor64: |
12652 | case X86::BI__builtin_ia32_xsaveopt: |
12653 | case X86::BI__builtin_ia32_xsaveopt64: |
12654 | case X86::BI__builtin_ia32_xrstors: |
12655 | case X86::BI__builtin_ia32_xrstors64: |
12656 | case X86::BI__builtin_ia32_xsavec: |
12657 | case X86::BI__builtin_ia32_xsavec64: |
12658 | case X86::BI__builtin_ia32_xsaves: |
12659 | case X86::BI__builtin_ia32_xsaves64: |
12660 | case X86::BI__builtin_ia32_xsetbv: |
12661 | case X86::BI_xsetbv: { |
12662 | Intrinsic::ID ID; |
12663 | #define INTRINSIC_X86_XSAVE_ID(NAME) \ |
12664 | case X86::BI__builtin_ia32_##NAME: \ |
12665 | ID = Intrinsic::x86_##NAME; \ |
12666 | break |
12667 | switch (BuiltinID) { |
12668 | default: llvm_unreachable("Unsupported intrinsic!"); |
12669 | INTRINSIC_X86_XSAVE_ID(xsave); |
12670 | INTRINSIC_X86_XSAVE_ID(xsave64); |
12671 | INTRINSIC_X86_XSAVE_ID(xrstor); |
12672 | INTRINSIC_X86_XSAVE_ID(xrstor64); |
12673 | INTRINSIC_X86_XSAVE_ID(xsaveopt); |
12674 | INTRINSIC_X86_XSAVE_ID(xsaveopt64); |
12675 | INTRINSIC_X86_XSAVE_ID(xrstors); |
12676 | INTRINSIC_X86_XSAVE_ID(xrstors64); |
12677 | INTRINSIC_X86_XSAVE_ID(xsavec); |
12678 | INTRINSIC_X86_XSAVE_ID(xsavec64); |
12679 | INTRINSIC_X86_XSAVE_ID(xsaves); |
12680 | INTRINSIC_X86_XSAVE_ID(xsaves64); |
12681 | INTRINSIC_X86_XSAVE_ID(xsetbv); |
12682 | case X86::BI_xsetbv: |
12683 | ID = Intrinsic::x86_xsetbv; |
12684 | break; |
12685 | } |
12686 | #undef INTRINSIC_X86_XSAVE_ID |
12687 | Value *Mhi = Builder.CreateTrunc( |
12688 | Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty); |
12689 | Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty); |
12690 | Ops[1] = Mhi; |
12691 | Ops.push_back(Mlo); |
12692 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
12693 | } |
12694 | case X86::BI__builtin_ia32_xgetbv: |
12695 | case X86::BI_xgetbv: |
12696 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops); |
12697 | case X86::BI__builtin_ia32_storedqudi128_mask: |
12698 | case X86::BI__builtin_ia32_storedqusi128_mask: |
12699 | case X86::BI__builtin_ia32_storedquhi128_mask: |
12700 | case X86::BI__builtin_ia32_storedquqi128_mask: |
12701 | case X86::BI__builtin_ia32_storeupd128_mask: |
12702 | case X86::BI__builtin_ia32_storeups128_mask: |
12703 | case X86::BI__builtin_ia32_storedqudi256_mask: |
12704 | case X86::BI__builtin_ia32_storedqusi256_mask: |
12705 | case X86::BI__builtin_ia32_storedquhi256_mask: |
12706 | case X86::BI__builtin_ia32_storedquqi256_mask: |
12707 | case X86::BI__builtin_ia32_storeupd256_mask: |
12708 | case X86::BI__builtin_ia32_storeups256_mask: |
12709 | case X86::BI__builtin_ia32_storedqudi512_mask: |
12710 | case X86::BI__builtin_ia32_storedqusi512_mask: |
12711 | case X86::BI__builtin_ia32_storedquhi512_mask: |
12712 | case X86::BI__builtin_ia32_storedquqi512_mask: |
12713 | case X86::BI__builtin_ia32_storeupd512_mask: |
12714 | case X86::BI__builtin_ia32_storeups512_mask: |
12715 | return EmitX86MaskedStore(*this, Ops, Align(1)); |
12716 | |
12717 | case X86::BI__builtin_ia32_storess128_mask: |
12718 | case X86::BI__builtin_ia32_storesd128_mask: |
12719 | return EmitX86MaskedStore(*this, Ops, Align(1)); |
12720 | |
12721 | case X86::BI__builtin_ia32_vpopcntb_128: |
12722 | case X86::BI__builtin_ia32_vpopcntd_128: |
12723 | case X86::BI__builtin_ia32_vpopcntq_128: |
12724 | case X86::BI__builtin_ia32_vpopcntw_128: |
12725 | case X86::BI__builtin_ia32_vpopcntb_256: |
12726 | case X86::BI__builtin_ia32_vpopcntd_256: |
12727 | case X86::BI__builtin_ia32_vpopcntq_256: |
12728 | case X86::BI__builtin_ia32_vpopcntw_256: |
12729 | case X86::BI__builtin_ia32_vpopcntb_512: |
12730 | case X86::BI__builtin_ia32_vpopcntd_512: |
12731 | case X86::BI__builtin_ia32_vpopcntq_512: |
12732 | case X86::BI__builtin_ia32_vpopcntw_512: { |
12733 | llvm::Type *ResultType = ConvertType(E->getType()); |
12734 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
12735 | return Builder.CreateCall(F, Ops); |
12736 | } |
12737 | case X86::BI__builtin_ia32_cvtmask2b128: |
12738 | case X86::BI__builtin_ia32_cvtmask2b256: |
12739 | case X86::BI__builtin_ia32_cvtmask2b512: |
12740 | case X86::BI__builtin_ia32_cvtmask2w128: |
12741 | case X86::BI__builtin_ia32_cvtmask2w256: |
12742 | case X86::BI__builtin_ia32_cvtmask2w512: |
12743 | case X86::BI__builtin_ia32_cvtmask2d128: |
12744 | case X86::BI__builtin_ia32_cvtmask2d256: |
12745 | case X86::BI__builtin_ia32_cvtmask2d512: |
12746 | case X86::BI__builtin_ia32_cvtmask2q128: |
12747 | case X86::BI__builtin_ia32_cvtmask2q256: |
12748 | case X86::BI__builtin_ia32_cvtmask2q512: |
12749 | return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType())); |
12750 | |
12751 | case X86::BI__builtin_ia32_cvtb2mask128: |
12752 | case X86::BI__builtin_ia32_cvtb2mask256: |
12753 | case X86::BI__builtin_ia32_cvtb2mask512: |
12754 | case X86::BI__builtin_ia32_cvtw2mask128: |
12755 | case X86::BI__builtin_ia32_cvtw2mask256: |
12756 | case X86::BI__builtin_ia32_cvtw2mask512: |
12757 | case X86::BI__builtin_ia32_cvtd2mask128: |
12758 | case X86::BI__builtin_ia32_cvtd2mask256: |
12759 | case X86::BI__builtin_ia32_cvtd2mask512: |
12760 | case X86::BI__builtin_ia32_cvtq2mask128: |
12761 | case X86::BI__builtin_ia32_cvtq2mask256: |
12762 | case X86::BI__builtin_ia32_cvtq2mask512: |
12763 | return EmitX86ConvertToMask(*this, Ops[0]); |
12764 | |
12765 | case X86::BI__builtin_ia32_cvtdq2ps512_mask: |
12766 | case X86::BI__builtin_ia32_cvtqq2ps512_mask: |
12767 | case X86::BI__builtin_ia32_cvtqq2pd512_mask: |
12768 | return EmitX86ConvertIntToFp(*this, E, Ops, true); |
12769 | case X86::BI__builtin_ia32_cvtudq2ps512_mask: |
12770 | case X86::BI__builtin_ia32_cvtuqq2ps512_mask: |
12771 | case X86::BI__builtin_ia32_cvtuqq2pd512_mask: |
12772 | return EmitX86ConvertIntToFp(*this, E, Ops, false); |
12773 | |
12774 | case X86::BI__builtin_ia32_vfmaddss3: |
12775 | case X86::BI__builtin_ia32_vfmaddsd3: |
12776 | case X86::BI__builtin_ia32_vfmaddss3_mask: |
12777 | case X86::BI__builtin_ia32_vfmaddsd3_mask: |
12778 | return EmitScalarFMAExpr(*this, E, Ops, Ops[0]); |
12779 | case X86::BI__builtin_ia32_vfmaddss: |
12780 | case X86::BI__builtin_ia32_vfmaddsd: |
12781 | return EmitScalarFMAExpr(*this, E, Ops, |
12782 | Constant::getNullValue(Ops[0]->getType())); |
12783 | case X86::BI__builtin_ia32_vfmaddss3_maskz: |
12784 | case X86::BI__builtin_ia32_vfmaddsd3_maskz: |
12785 | return EmitScalarFMAExpr(*this, E, Ops, Ops[0], true); |
12786 | case X86::BI__builtin_ia32_vfmaddss3_mask3: |
12787 | case X86::BI__builtin_ia32_vfmaddsd3_mask3: |
12788 | return EmitScalarFMAExpr(*this, E, Ops, Ops[2], false, 2); |
12789 | case X86::BI__builtin_ia32_vfmsubss3_mask3: |
12790 | case X86::BI__builtin_ia32_vfmsubsd3_mask3: |
12791 | return EmitScalarFMAExpr(*this, E, Ops, Ops[2], false, 2, |
12792 | true); |
12793 | case X86::BI__builtin_ia32_vfmaddps: |
12794 | case X86::BI__builtin_ia32_vfmaddpd: |
12795 | case X86::BI__builtin_ia32_vfmaddps256: |
12796 | case X86::BI__builtin_ia32_vfmaddpd256: |
12797 | case X86::BI__builtin_ia32_vfmaddps512_mask: |
12798 | case X86::BI__builtin_ia32_vfmaddps512_maskz: |
12799 | case X86::BI__builtin_ia32_vfmaddps512_mask3: |
12800 | case X86::BI__builtin_ia32_vfmsubps512_mask3: |
12801 | case X86::BI__builtin_ia32_vfmaddpd512_mask: |
12802 | case X86::BI__builtin_ia32_vfmaddpd512_maskz: |
12803 | case X86::BI__builtin_ia32_vfmaddpd512_mask3: |
12804 | case X86::BI__builtin_ia32_vfmsubpd512_mask3: |
12805 | return EmitX86FMAExpr(*this, E, Ops, BuiltinID, false); |
12806 | case X86::BI__builtin_ia32_vfmaddsubps512_mask: |
12807 | case X86::BI__builtin_ia32_vfmaddsubps512_maskz: |
12808 | case X86::BI__builtin_ia32_vfmaddsubps512_mask3: |
12809 | case X86::BI__builtin_ia32_vfmsubaddps512_mask3: |
12810 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask: |
12811 | case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: |
12812 | case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: |
12813 | case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: |
12814 | return EmitX86FMAExpr(*this, E, Ops, BuiltinID, true); |
12815 | |
12816 | case X86::BI__builtin_ia32_movdqa32store128_mask: |
12817 | case X86::BI__builtin_ia32_movdqa64store128_mask: |
12818 | case X86::BI__builtin_ia32_storeaps128_mask: |
12819 | case X86::BI__builtin_ia32_storeapd128_mask: |
12820 | case X86::BI__builtin_ia32_movdqa32store256_mask: |
12821 | case X86::BI__builtin_ia32_movdqa64store256_mask: |
12822 | case X86::BI__builtin_ia32_storeaps256_mask: |
12823 | case X86::BI__builtin_ia32_storeapd256_mask: |
12824 | case X86::BI__builtin_ia32_movdqa32store512_mask: |
12825 | case X86::BI__builtin_ia32_movdqa64store512_mask: |
12826 | case X86::BI__builtin_ia32_storeaps512_mask: |
12827 | case X86::BI__builtin_ia32_storeapd512_mask: |
12828 | return EmitX86MaskedStore( |
12829 | *this, Ops, |
12830 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); |
12831 | |
12832 | case X86::BI__builtin_ia32_loadups128_mask: |
12833 | case X86::BI__builtin_ia32_loadups256_mask: |
12834 | case X86::BI__builtin_ia32_loadups512_mask: |
12835 | case X86::BI__builtin_ia32_loadupd128_mask: |
12836 | case X86::BI__builtin_ia32_loadupd256_mask: |
12837 | case X86::BI__builtin_ia32_loadupd512_mask: |
12838 | case X86::BI__builtin_ia32_loaddquqi128_mask: |
12839 | case X86::BI__builtin_ia32_loaddquqi256_mask: |
12840 | case X86::BI__builtin_ia32_loaddquqi512_mask: |
12841 | case X86::BI__builtin_ia32_loaddquhi128_mask: |
12842 | case X86::BI__builtin_ia32_loaddquhi256_mask: |
12843 | case X86::BI__builtin_ia32_loaddquhi512_mask: |
12844 | case X86::BI__builtin_ia32_loaddqusi128_mask: |
12845 | case X86::BI__builtin_ia32_loaddqusi256_mask: |
12846 | case X86::BI__builtin_ia32_loaddqusi512_mask: |
12847 | case X86::BI__builtin_ia32_loaddqudi128_mask: |
12848 | case X86::BI__builtin_ia32_loaddqudi256_mask: |
12849 | case X86::BI__builtin_ia32_loaddqudi512_mask: |
12850 | return EmitX86MaskedLoad(*this, Ops, Align(1)); |
12851 | |
12852 | case X86::BI__builtin_ia32_loadss128_mask: |
12853 | case X86::BI__builtin_ia32_loadsd128_mask: |
12854 | return EmitX86MaskedLoad(*this, Ops, Align(1)); |
12855 | |
12856 | case X86::BI__builtin_ia32_loadaps128_mask: |
12857 | case X86::BI__builtin_ia32_loadaps256_mask: |
12858 | case X86::BI__builtin_ia32_loadaps512_mask: |
12859 | case X86::BI__builtin_ia32_loadapd128_mask: |
12860 | case X86::BI__builtin_ia32_loadapd256_mask: |
12861 | case X86::BI__builtin_ia32_loadapd512_mask: |
12862 | case X86::BI__builtin_ia32_movdqa32load128_mask: |
12863 | case X86::BI__builtin_ia32_movdqa32load256_mask: |
12864 | case X86::BI__builtin_ia32_movdqa32load512_mask: |
12865 | case X86::BI__builtin_ia32_movdqa64load128_mask: |
12866 | case X86::BI__builtin_ia32_movdqa64load256_mask: |
12867 | case X86::BI__builtin_ia32_movdqa64load512_mask: |
12868 | return EmitX86MaskedLoad( |
12869 | *this, Ops, |
12870 | getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); |
12871 | |
12872 | case X86::BI__builtin_ia32_expandloaddf128_mask: |
12873 | case X86::BI__builtin_ia32_expandloaddf256_mask: |
12874 | case X86::BI__builtin_ia32_expandloaddf512_mask: |
12875 | case X86::BI__builtin_ia32_expandloadsf128_mask: |
12876 | case X86::BI__builtin_ia32_expandloadsf256_mask: |
12877 | case X86::BI__builtin_ia32_expandloadsf512_mask: |
12878 | case X86::BI__builtin_ia32_expandloaddi128_mask: |
12879 | case X86::BI__builtin_ia32_expandloaddi256_mask: |
12880 | case X86::BI__builtin_ia32_expandloaddi512_mask: |
12881 | case X86::BI__builtin_ia32_expandloadsi128_mask: |
12882 | case X86::BI__builtin_ia32_expandloadsi256_mask: |
12883 | case X86::BI__builtin_ia32_expandloadsi512_mask: |
12884 | case X86::BI__builtin_ia32_expandloadhi128_mask: |
12885 | case X86::BI__builtin_ia32_expandloadhi256_mask: |
12886 | case X86::BI__builtin_ia32_expandloadhi512_mask: |
12887 | case X86::BI__builtin_ia32_expandloadqi128_mask: |
12888 | case X86::BI__builtin_ia32_expandloadqi256_mask: |
12889 | case X86::BI__builtin_ia32_expandloadqi512_mask: |
12890 | return EmitX86ExpandLoad(*this, Ops); |
12891 | |
12892 | case X86::BI__builtin_ia32_compressstoredf128_mask: |
12893 | case X86::BI__builtin_ia32_compressstoredf256_mask: |
12894 | case X86::BI__builtin_ia32_compressstoredf512_mask: |
12895 | case X86::BI__builtin_ia32_compressstoresf128_mask: |
12896 | case X86::BI__builtin_ia32_compressstoresf256_mask: |
12897 | case X86::BI__builtin_ia32_compressstoresf512_mask: |
12898 | case X86::BI__builtin_ia32_compressstoredi128_mask: |
12899 | case X86::BI__builtin_ia32_compressstoredi256_mask: |
12900 | case X86::BI__builtin_ia32_compressstoredi512_mask: |
12901 | case X86::BI__builtin_ia32_compressstoresi128_mask: |
12902 | case X86::BI__builtin_ia32_compressstoresi256_mask: |
12903 | case X86::BI__builtin_ia32_compressstoresi512_mask: |
12904 | case X86::BI__builtin_ia32_compressstorehi128_mask: |
12905 | case X86::BI__builtin_ia32_compressstorehi256_mask: |
12906 | case X86::BI__builtin_ia32_compressstorehi512_mask: |
12907 | case X86::BI__builtin_ia32_compressstoreqi128_mask: |
12908 | case X86::BI__builtin_ia32_compressstoreqi256_mask: |
12909 | case X86::BI__builtin_ia32_compressstoreqi512_mask: |
12910 | return EmitX86CompressStore(*this, Ops); |
12911 | |
12912 | case X86::BI__builtin_ia32_expanddf128_mask: |
12913 | case X86::BI__builtin_ia32_expanddf256_mask: |
12914 | case X86::BI__builtin_ia32_expanddf512_mask: |
12915 | case X86::BI__builtin_ia32_expandsf128_mask: |
12916 | case X86::BI__builtin_ia32_expandsf256_mask: |
12917 | case X86::BI__builtin_ia32_expandsf512_mask: |
12918 | case X86::BI__builtin_ia32_expanddi128_mask: |
12919 | case X86::BI__builtin_ia32_expanddi256_mask: |
12920 | case X86::BI__builtin_ia32_expanddi512_mask: |
12921 | case X86::BI__builtin_ia32_expandsi128_mask: |
12922 | case X86::BI__builtin_ia32_expandsi256_mask: |
12923 | case X86::BI__builtin_ia32_expandsi512_mask: |
12924 | case X86::BI__builtin_ia32_expandhi128_mask: |
12925 | case X86::BI__builtin_ia32_expandhi256_mask: |
12926 | case X86::BI__builtin_ia32_expandhi512_mask: |
12927 | case X86::BI__builtin_ia32_expandqi128_mask: |
12928 | case X86::BI__builtin_ia32_expandqi256_mask: |
12929 | case X86::BI__builtin_ia32_expandqi512_mask: |
12930 | return EmitX86CompressExpand(*this, Ops, false); |
12931 | |
12932 | case X86::BI__builtin_ia32_compressdf128_mask: |
12933 | case X86::BI__builtin_ia32_compressdf256_mask: |
12934 | case X86::BI__builtin_ia32_compressdf512_mask: |
12935 | case X86::BI__builtin_ia32_compresssf128_mask: |
12936 | case X86::BI__builtin_ia32_compresssf256_mask: |
12937 | case X86::BI__builtin_ia32_compresssf512_mask: |
12938 | case X86::BI__builtin_ia32_compressdi128_mask: |
12939 | case X86::BI__builtin_ia32_compressdi256_mask: |
12940 | case X86::BI__builtin_ia32_compressdi512_mask: |
12941 | case X86::BI__builtin_ia32_compresssi128_mask: |
12942 | case X86::BI__builtin_ia32_compresssi256_mask: |
12943 | case X86::BI__builtin_ia32_compresssi512_mask: |
12944 | case X86::BI__builtin_ia32_compresshi128_mask: |
12945 | case X86::BI__builtin_ia32_compresshi256_mask: |
12946 | case X86::BI__builtin_ia32_compresshi512_mask: |
12947 | case X86::BI__builtin_ia32_compressqi128_mask: |
12948 | case X86::BI__builtin_ia32_compressqi256_mask: |
12949 | case X86::BI__builtin_ia32_compressqi512_mask: |
12950 | return EmitX86CompressExpand(*this, Ops, true); |
12951 | |
12952 | case X86::BI__builtin_ia32_gather3div2df: |
12953 | case X86::BI__builtin_ia32_gather3div2di: |
12954 | case X86::BI__builtin_ia32_gather3div4df: |
12955 | case X86::BI__builtin_ia32_gather3div4di: |
12956 | case X86::BI__builtin_ia32_gather3div4sf: |
12957 | case X86::BI__builtin_ia32_gather3div4si: |
12958 | case X86::BI__builtin_ia32_gather3div8sf: |
12959 | case X86::BI__builtin_ia32_gather3div8si: |
12960 | case X86::BI__builtin_ia32_gather3siv2df: |
12961 | case X86::BI__builtin_ia32_gather3siv2di: |
12962 | case X86::BI__builtin_ia32_gather3siv4df: |
12963 | case X86::BI__builtin_ia32_gather3siv4di: |
12964 | case X86::BI__builtin_ia32_gather3siv4sf: |
12965 | case X86::BI__builtin_ia32_gather3siv4si: |
12966 | case X86::BI__builtin_ia32_gather3siv8sf: |
12967 | case X86::BI__builtin_ia32_gather3siv8si: |
12968 | case X86::BI__builtin_ia32_gathersiv8df: |
12969 | case X86::BI__builtin_ia32_gathersiv16sf: |
12970 | case X86::BI__builtin_ia32_gatherdiv8df: |
12971 | case X86::BI__builtin_ia32_gatherdiv16sf: |
12972 | case X86::BI__builtin_ia32_gathersiv8di: |
12973 | case X86::BI__builtin_ia32_gathersiv16si: |
12974 | case X86::BI__builtin_ia32_gatherdiv8di: |
12975 | case X86::BI__builtin_ia32_gatherdiv16si: { |
12976 | Intrinsic::ID IID; |
12977 | switch (BuiltinID) { |
12978 | default: llvm_unreachable("Unexpected builtin"); |
12979 | case X86::BI__builtin_ia32_gather3div2df: |
12980 | IID = Intrinsic::x86_avx512_mask_gather3div2_df; |
12981 | break; |
12982 | case X86::BI__builtin_ia32_gather3div2di: |
12983 | IID = Intrinsic::x86_avx512_mask_gather3div2_di; |
12984 | break; |
12985 | case X86::BI__builtin_ia32_gather3div4df: |
12986 | IID = Intrinsic::x86_avx512_mask_gather3div4_df; |
12987 | break; |
12988 | case X86::BI__builtin_ia32_gather3div4di: |
12989 | IID = Intrinsic::x86_avx512_mask_gather3div4_di; |
12990 | break; |
12991 | case X86::BI__builtin_ia32_gather3div4sf: |
12992 | IID = Intrinsic::x86_avx512_mask_gather3div4_sf; |
12993 | break; |
12994 | case X86::BI__builtin_ia32_gather3div4si: |
12995 | IID = Intrinsic::x86_avx512_mask_gather3div4_si; |
12996 | break; |
12997 | case X86::BI__builtin_ia32_gather3div8sf: |
12998 | IID = Intrinsic::x86_avx512_mask_gather3div8_sf; |
12999 | break; |
13000 | case X86::BI__builtin_ia32_gather3div8si: |
13001 | IID = Intrinsic::x86_avx512_mask_gather3div8_si; |
13002 | break; |
13003 | case X86::BI__builtin_ia32_gather3siv2df: |
13004 | IID = Intrinsic::x86_avx512_mask_gather3siv2_df; |
13005 | break; |
13006 | case X86::BI__builtin_ia32_gather3siv2di: |
13007 | IID = Intrinsic::x86_avx512_mask_gather3siv2_di; |
13008 | break; |
13009 | case X86::BI__builtin_ia32_gather3siv4df: |
13010 | IID = Intrinsic::x86_avx512_mask_gather3siv4_df; |
13011 | break; |
13012 | case X86::BI__builtin_ia32_gather3siv4di: |
13013 | IID = Intrinsic::x86_avx512_mask_gather3siv4_di; |
13014 | break; |
13015 | case X86::BI__builtin_ia32_gather3siv4sf: |
13016 | IID = Intrinsic::x86_avx512_mask_gather3siv4_sf; |
13017 | break; |
13018 | case X86::BI__builtin_ia32_gather3siv4si: |
13019 | IID = Intrinsic::x86_avx512_mask_gather3siv4_si; |
13020 | break; |
13021 | case X86::BI__builtin_ia32_gather3siv8sf: |
13022 | IID = Intrinsic::x86_avx512_mask_gather3siv8_sf; |
13023 | break; |
13024 | case X86::BI__builtin_ia32_gather3siv8si: |
13025 | IID = Intrinsic::x86_avx512_mask_gather3siv8_si; |
13026 | break; |
13027 | case X86::BI__builtin_ia32_gathersiv8df: |
13028 | IID = Intrinsic::x86_avx512_mask_gather_dpd_512; |
13029 | break; |
13030 | case X86::BI__builtin_ia32_gathersiv16sf: |
13031 | IID = Intrinsic::x86_avx512_mask_gather_dps_512; |
13032 | break; |
13033 | case X86::BI__builtin_ia32_gatherdiv8df: |
13034 | IID = Intrinsic::x86_avx512_mask_gather_qpd_512; |
13035 | break; |
13036 | case X86::BI__builtin_ia32_gatherdiv16sf: |
13037 | IID = Intrinsic::x86_avx512_mask_gather_qps_512; |
13038 | break; |
13039 | case X86::BI__builtin_ia32_gathersiv8di: |
13040 | IID = Intrinsic::x86_avx512_mask_gather_dpq_512; |
13041 | break; |
13042 | case X86::BI__builtin_ia32_gathersiv16si: |
13043 | IID = Intrinsic::x86_avx512_mask_gather_dpi_512; |
13044 | break; |
13045 | case X86::BI__builtin_ia32_gatherdiv8di: |
13046 | IID = Intrinsic::x86_avx512_mask_gather_qpq_512; |
13047 | break; |
13048 | case X86::BI__builtin_ia32_gatherdiv16si: |
13049 | IID = Intrinsic::x86_avx512_mask_gather_qpi_512; |
13050 | break; |
13051 | } |
13052 | |
13053 | unsigned MinElts = std::min( |
13054 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(), |
13055 | cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements()); |
13056 | Ops[3] = getMaskVecValue(*this, Ops[3], MinElts); |
13057 | Function *Intr = CGM.getIntrinsic(IID); |
13058 | return Builder.CreateCall(Intr, Ops); |
13059 | } |
13060 | |
13061 | case X86::BI__builtin_ia32_scattersiv8df: |
13062 | case X86::BI__builtin_ia32_scattersiv16sf: |
13063 | case X86::BI__builtin_ia32_scatterdiv8df: |
13064 | case X86::BI__builtin_ia32_scatterdiv16sf: |
13065 | case X86::BI__builtin_ia32_scattersiv8di: |
13066 | case X86::BI__builtin_ia32_scattersiv16si: |
13067 | case X86::BI__builtin_ia32_scatterdiv8di: |
13068 | case X86::BI__builtin_ia32_scatterdiv16si: |
13069 | case X86::BI__builtin_ia32_scatterdiv2df: |
13070 | case X86::BI__builtin_ia32_scatterdiv2di: |
13071 | case X86::BI__builtin_ia32_scatterdiv4df: |
13072 | case X86::BI__builtin_ia32_scatterdiv4di: |
13073 | case X86::BI__builtin_ia32_scatterdiv4sf: |
13074 | case X86::BI__builtin_ia32_scatterdiv4si: |
13075 | case X86::BI__builtin_ia32_scatterdiv8sf: |
13076 | case X86::BI__builtin_ia32_scatterdiv8si: |
13077 | case X86::BI__builtin_ia32_scattersiv2df: |
13078 | case X86::BI__builtin_ia32_scattersiv2di: |
13079 | case X86::BI__builtin_ia32_scattersiv4df: |
13080 | case X86::BI__builtin_ia32_scattersiv4di: |
13081 | case X86::BI__builtin_ia32_scattersiv4sf: |
13082 | case X86::BI__builtin_ia32_scattersiv4si: |
13083 | case X86::BI__builtin_ia32_scattersiv8sf: |
13084 | case X86::BI__builtin_ia32_scattersiv8si: { |
13085 | Intrinsic::ID IID; |
13086 | switch (BuiltinID) { |
13087 | default: llvm_unreachable("Unexpected builtin"); |
13088 | case X86::BI__builtin_ia32_scattersiv8df: |
13089 | IID = Intrinsic::x86_avx512_mask_scatter_dpd_512; |
13090 | break; |
13091 | case X86::BI__builtin_ia32_scattersiv16sf: |
13092 | IID = Intrinsic::x86_avx512_mask_scatter_dps_512; |
13093 | break; |
13094 | case X86::BI__builtin_ia32_scatterdiv8df: |
13095 | IID = Intrinsic::x86_avx512_mask_scatter_qpd_512; |
13096 | break; |
13097 | case X86::BI__builtin_ia32_scatterdiv16sf: |
13098 | IID = Intrinsic::x86_avx512_mask_scatter_qps_512; |
13099 | break; |
13100 | case X86::BI__builtin_ia32_scattersiv8di: |
13101 | IID = Intrinsic::x86_avx512_mask_scatter_dpq_512; |
13102 | break; |
13103 | case X86::BI__builtin_ia32_scattersiv16si: |
13104 | IID = Intrinsic::x86_avx512_mask_scatter_dpi_512; |
13105 | break; |
13106 | case X86::BI__builtin_ia32_scatterdiv8di: |
13107 | IID = Intrinsic::x86_avx512_mask_scatter_qpq_512; |
13108 | break; |
13109 | case X86::BI__builtin_ia32_scatterdiv16si: |
13110 | IID = Intrinsic::x86_avx512_mask_scatter_qpi_512; |
13111 | break; |
13112 | case X86::BI__builtin_ia32_scatterdiv2df: |
13113 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_df; |
13114 | break; |
13115 | case X86::BI__builtin_ia32_scatterdiv2di: |
13116 | IID = Intrinsic::x86_avx512_mask_scatterdiv2_di; |
13117 | break; |
13118 | case X86::BI__builtin_ia32_scatterdiv4df: |
13119 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_df; |
13120 | break; |
13121 | case X86::BI__builtin_ia32_scatterdiv4di: |
13122 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_di; |
13123 | break; |
13124 | case X86::BI__builtin_ia32_scatterdiv4sf: |
13125 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf; |
13126 | break; |
13127 | case X86::BI__builtin_ia32_scatterdiv4si: |
13128 | IID = Intrinsic::x86_avx512_mask_scatterdiv4_si; |
13129 | break; |
13130 | case X86::BI__builtin_ia32_scatterdiv8sf: |
13131 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf; |
13132 | break; |
13133 | case X86::BI__builtin_ia32_scatterdiv8si: |
13134 | IID = Intrinsic::x86_avx512_mask_scatterdiv8_si; |
13135 | break; |
13136 | case X86::BI__builtin_ia32_scattersiv2df: |
13137 | IID = Intrinsic::x86_avx512_mask_scattersiv2_df; |
13138 | break; |
13139 | case X86::BI__builtin_ia32_scattersiv2di: |
13140 | IID = Intrinsic::x86_avx512_mask_scattersiv2_di; |
13141 | break; |
13142 | case X86::BI__builtin_ia32_scattersiv4df: |
13143 | IID = Intrinsic::x86_avx512_mask_scattersiv4_df; |
13144 | break; |
13145 | case X86::BI__builtin_ia32_scattersiv4di: |
13146 | IID = Intrinsic::x86_avx512_mask_scattersiv4_di; |
13147 | break; |
13148 | case X86::BI__builtin_ia32_scattersiv4sf: |
13149 | IID = Intrinsic::x86_avx512_mask_scattersiv4_sf; |
13150 | break; |
13151 | case X86::BI__builtin_ia32_scattersiv4si: |
13152 | IID = Intrinsic::x86_avx512_mask_scattersiv4_si; |
13153 | break; |
13154 | case X86::BI__builtin_ia32_scattersiv8sf: |
13155 | IID = Intrinsic::x86_avx512_mask_scattersiv8_sf; |
13156 | break; |
13157 | case X86::BI__builtin_ia32_scattersiv8si: |
13158 | IID = Intrinsic::x86_avx512_mask_scattersiv8_si; |
13159 | break; |
13160 | } |
13161 | |
13162 | unsigned MinElts = std::min( |
13163 | cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(), |
13164 | cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements()); |
13165 | Ops[1] = getMaskVecValue(*this, Ops[1], MinElts); |
13166 | Function *Intr = CGM.getIntrinsic(IID); |
13167 | return Builder.CreateCall(Intr, Ops); |
13168 | } |
13169 | |
13170 | case X86::BI__builtin_ia32_vextractf128_pd256: |
13171 | case X86::BI__builtin_ia32_vextractf128_ps256: |
13172 | case X86::BI__builtin_ia32_vextractf128_si256: |
13173 | case X86::BI__builtin_ia32_extract128i256: |
13174 | case X86::BI__builtin_ia32_extractf64x4_mask: |
13175 | case X86::BI__builtin_ia32_extractf32x4_mask: |
13176 | case X86::BI__builtin_ia32_extracti64x4_mask: |
13177 | case X86::BI__builtin_ia32_extracti32x4_mask: |
13178 | case X86::BI__builtin_ia32_extractf32x8_mask: |
13179 | case X86::BI__builtin_ia32_extracti32x8_mask: |
13180 | case X86::BI__builtin_ia32_extractf32x4_256_mask: |
13181 | case X86::BI__builtin_ia32_extracti32x4_256_mask: |
13182 | case X86::BI__builtin_ia32_extractf64x2_256_mask: |
13183 | case X86::BI__builtin_ia32_extracti64x2_256_mask: |
13184 | case X86::BI__builtin_ia32_extractf64x2_512_mask: |
13185 | case X86::BI__builtin_ia32_extracti64x2_512_mask: { |
13186 | auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType())); |
13187 | unsigned NumElts = DstTy->getNumElements(); |
13188 | unsigned SrcNumElts = |
13189 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13190 | unsigned SubVectors = SrcNumElts / NumElts; |
13191 | unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue(); |
13192 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); |
13193 | Index &= SubVectors - 1; |
13194 | Index *= NumElts; |
13195 | |
13196 | int Indices[16]; |
13197 | for (unsigned i = 0; i != NumElts; ++i) |
13198 | Indices[i] = i + Index; |
13199 | |
13200 | Value *Res = Builder.CreateShuffleVector(Ops[0], |
13201 | makeArrayRef(Indices, NumElts), |
13202 | "extract"); |
13203 | |
13204 | if (Ops.size() == 4) |
13205 | Res = EmitX86Select(*this, Ops[3], Res, Ops[2]); |
13206 | |
13207 | return Res; |
13208 | } |
13209 | case X86::BI__builtin_ia32_vinsertf128_pd256: |
13210 | case X86::BI__builtin_ia32_vinsertf128_ps256: |
13211 | case X86::BI__builtin_ia32_vinsertf128_si256: |
13212 | case X86::BI__builtin_ia32_insert128i256: |
13213 | case X86::BI__builtin_ia32_insertf64x4: |
13214 | case X86::BI__builtin_ia32_insertf32x4: |
13215 | case X86::BI__builtin_ia32_inserti64x4: |
13216 | case X86::BI__builtin_ia32_inserti32x4: |
13217 | case X86::BI__builtin_ia32_insertf32x8: |
13218 | case X86::BI__builtin_ia32_inserti32x8: |
13219 | case X86::BI__builtin_ia32_insertf32x4_256: |
13220 | case X86::BI__builtin_ia32_inserti32x4_256: |
13221 | case X86::BI__builtin_ia32_insertf64x2_256: |
13222 | case X86::BI__builtin_ia32_inserti64x2_256: |
13223 | case X86::BI__builtin_ia32_insertf64x2_512: |
13224 | case X86::BI__builtin_ia32_inserti64x2_512: { |
13225 | unsigned DstNumElts = |
13226 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13227 | unsigned SrcNumElts = |
13228 | cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements(); |
13229 | unsigned SubVectors = DstNumElts / SrcNumElts; |
13230 | unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); |
13231 | assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); |
13232 | Index &= SubVectors - 1; |
13233 | Index *= SrcNumElts; |
13234 | |
13235 | int Indices[16]; |
13236 | for (unsigned i = 0; i != DstNumElts; ++i) |
13237 | Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i; |
13238 | |
13239 | Value *Op1 = Builder.CreateShuffleVector(Ops[1], |
13240 | makeArrayRef(Indices, DstNumElts), |
13241 | "widen"); |
13242 | |
13243 | for (unsigned i = 0; i != DstNumElts; ++i) { |
13244 | if (i >= Index && i < (Index + SrcNumElts)) |
13245 | Indices[i] = (i - Index) + DstNumElts; |
13246 | else |
13247 | Indices[i] = i; |
13248 | } |
13249 | |
13250 | return Builder.CreateShuffleVector(Ops[0], Op1, |
13251 | makeArrayRef(Indices, DstNumElts), |
13252 | "insert"); |
13253 | } |
13254 | case X86::BI__builtin_ia32_pmovqd512_mask: |
13255 | case X86::BI__builtin_ia32_pmovwb512_mask: { |
13256 | Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType()); |
13257 | return EmitX86Select(*this, Ops[2], Res, Ops[1]); |
13258 | } |
13259 | case X86::BI__builtin_ia32_pmovdb512_mask: |
13260 | case X86::BI__builtin_ia32_pmovdw512_mask: |
13261 | case X86::BI__builtin_ia32_pmovqw512_mask: { |
13262 | if (const auto *C = dyn_cast<Constant>(Ops[2])) |
13263 | if (C->isAllOnesValue()) |
13264 | return Builder.CreateTrunc(Ops[0], Ops[1]->getType()); |
13265 | |
13266 | Intrinsic::ID IID; |
13267 | switch (BuiltinID) { |
13268 | default: llvm_unreachable("Unsupported intrinsic!"); |
13269 | case X86::BI__builtin_ia32_pmovdb512_mask: |
13270 | IID = Intrinsic::x86_avx512_mask_pmov_db_512; |
13271 | break; |
13272 | case X86::BI__builtin_ia32_pmovdw512_mask: |
13273 | IID = Intrinsic::x86_avx512_mask_pmov_dw_512; |
13274 | break; |
13275 | case X86::BI__builtin_ia32_pmovqw512_mask: |
13276 | IID = Intrinsic::x86_avx512_mask_pmov_qw_512; |
13277 | break; |
13278 | } |
13279 | |
13280 | Function *Intr = CGM.getIntrinsic(IID); |
13281 | return Builder.CreateCall(Intr, Ops); |
13282 | } |
13283 | case X86::BI__builtin_ia32_pblendw128: |
13284 | case X86::BI__builtin_ia32_blendpd: |
13285 | case X86::BI__builtin_ia32_blendps: |
13286 | case X86::BI__builtin_ia32_blendpd256: |
13287 | case X86::BI__builtin_ia32_blendps256: |
13288 | case X86::BI__builtin_ia32_pblendw256: |
13289 | case X86::BI__builtin_ia32_pblendd128: |
13290 | case X86::BI__builtin_ia32_pblendd256: { |
13291 | unsigned NumElts = |
13292 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13293 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13294 | |
13295 | int Indices[16]; |
13296 | |
13297 | |
13298 | for (unsigned i = 0; i != NumElts; ++i) |
13299 | Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i; |
13300 | |
13301 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
13302 | makeArrayRef(Indices, NumElts), |
13303 | "blend"); |
13304 | } |
13305 | case X86::BI__builtin_ia32_pshuflw: |
13306 | case X86::BI__builtin_ia32_pshuflw256: |
13307 | case X86::BI__builtin_ia32_pshuflw512: { |
13308 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13309 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13310 | unsigned NumElts = Ty->getNumElements(); |
13311 | |
13312 | |
13313 | Imm = (Imm & 0xff) * 0x01010101; |
13314 | |
13315 | int Indices[32]; |
13316 | for (unsigned l = 0; l != NumElts; l += 8) { |
13317 | for (unsigned i = 0; i != 4; ++i) { |
13318 | Indices[l + i] = l + (Imm & 3); |
13319 | Imm >>= 2; |
13320 | } |
13321 | for (unsigned i = 4; i != 8; ++i) |
13322 | Indices[l + i] = l + i; |
13323 | } |
13324 | |
13325 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13326 | "pshuflw"); |
13327 | } |
13328 | case X86::BI__builtin_ia32_pshufhw: |
13329 | case X86::BI__builtin_ia32_pshufhw256: |
13330 | case X86::BI__builtin_ia32_pshufhw512: { |
13331 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13332 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13333 | unsigned NumElts = Ty->getNumElements(); |
13334 | |
13335 | |
13336 | Imm = (Imm & 0xff) * 0x01010101; |
13337 | |
13338 | int Indices[32]; |
13339 | for (unsigned l = 0; l != NumElts; l += 8) { |
13340 | for (unsigned i = 0; i != 4; ++i) |
13341 | Indices[l + i] = l + i; |
13342 | for (unsigned i = 4; i != 8; ++i) { |
13343 | Indices[l + i] = l + 4 + (Imm & 3); |
13344 | Imm >>= 2; |
13345 | } |
13346 | } |
13347 | |
13348 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13349 | "pshufhw"); |
13350 | } |
13351 | case X86::BI__builtin_ia32_pshufd: |
13352 | case X86::BI__builtin_ia32_pshufd256: |
13353 | case X86::BI__builtin_ia32_pshufd512: |
13354 | case X86::BI__builtin_ia32_vpermilpd: |
13355 | case X86::BI__builtin_ia32_vpermilps: |
13356 | case X86::BI__builtin_ia32_vpermilpd256: |
13357 | case X86::BI__builtin_ia32_vpermilps256: |
13358 | case X86::BI__builtin_ia32_vpermilpd512: |
13359 | case X86::BI__builtin_ia32_vpermilps512: { |
13360 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13361 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13362 | unsigned NumElts = Ty->getNumElements(); |
13363 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
13364 | unsigned NumLaneElts = NumElts / NumLanes; |
13365 | |
13366 | |
13367 | Imm = (Imm & 0xff) * 0x01010101; |
13368 | |
13369 | int Indices[16]; |
13370 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
13371 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
13372 | Indices[i + l] = (Imm % NumLaneElts) + l; |
13373 | Imm /= NumLaneElts; |
13374 | } |
13375 | } |
13376 | |
13377 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13378 | "permil"); |
13379 | } |
13380 | case X86::BI__builtin_ia32_shufpd: |
13381 | case X86::BI__builtin_ia32_shufpd256: |
13382 | case X86::BI__builtin_ia32_shufpd512: |
13383 | case X86::BI__builtin_ia32_shufps: |
13384 | case X86::BI__builtin_ia32_shufps256: |
13385 | case X86::BI__builtin_ia32_shufps512: { |
13386 | uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13387 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13388 | unsigned NumElts = Ty->getNumElements(); |
13389 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; |
13390 | unsigned NumLaneElts = NumElts / NumLanes; |
13391 | |
13392 | |
13393 | Imm = (Imm & 0xff) * 0x01010101; |
13394 | |
13395 | int Indices[16]; |
13396 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
13397 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
13398 | unsigned Index = Imm % NumLaneElts; |
13399 | Imm /= NumLaneElts; |
13400 | if (i >= (NumLaneElts / 2)) |
13401 | Index += NumElts; |
13402 | Indices[l + i] = l + Index; |
13403 | } |
13404 | } |
13405 | |
13406 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
13407 | makeArrayRef(Indices, NumElts), |
13408 | "shufp"); |
13409 | } |
13410 | case X86::BI__builtin_ia32_permdi256: |
13411 | case X86::BI__builtin_ia32_permdf256: |
13412 | case X86::BI__builtin_ia32_permdi512: |
13413 | case X86::BI__builtin_ia32_permdf512: { |
13414 | unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
13415 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13416 | unsigned NumElts = Ty->getNumElements(); |
13417 | |
13418 | |
13419 | int Indices[8]; |
13420 | for (unsigned l = 0; l != NumElts; l += 4) |
13421 | for (unsigned i = 0; i != 4; ++i) |
13422 | Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3); |
13423 | |
13424 | return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts), |
13425 | "perm"); |
13426 | } |
13427 | case X86::BI__builtin_ia32_palignr128: |
13428 | case X86::BI__builtin_ia32_palignr256: |
13429 | case X86::BI__builtin_ia32_palignr512: { |
13430 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; |
13431 | |
13432 | unsigned NumElts = |
13433 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13434 | assert(NumElts % 16 == 0); |
13435 | |
13436 | |
13437 | |
13438 | if (ShiftVal >= 32) |
13439 | return llvm::Constant::getNullValue(ConvertType(E->getType())); |
13440 | |
13441 | |
13442 | |
13443 | if (ShiftVal > 16) { |
13444 | ShiftVal -= 16; |
13445 | Ops[1] = Ops[0]; |
13446 | Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType()); |
13447 | } |
13448 | |
13449 | int Indices[64]; |
13450 | |
13451 | for (unsigned l = 0; l != NumElts; l += 16) { |
13452 | for (unsigned i = 0; i != 16; ++i) { |
13453 | unsigned Idx = ShiftVal + i; |
13454 | if (Idx >= 16) |
13455 | Idx += NumElts - 16; |
13456 | Indices[l + i] = Idx + l; |
13457 | } |
13458 | } |
13459 | |
13460 | return Builder.CreateShuffleVector(Ops[1], Ops[0], |
13461 | makeArrayRef(Indices, NumElts), |
13462 | "palignr"); |
13463 | } |
13464 | case X86::BI__builtin_ia32_alignd128: |
13465 | case X86::BI__builtin_ia32_alignd256: |
13466 | case X86::BI__builtin_ia32_alignd512: |
13467 | case X86::BI__builtin_ia32_alignq128: |
13468 | case X86::BI__builtin_ia32_alignq256: |
13469 | case X86::BI__builtin_ia32_alignq512: { |
13470 | unsigned NumElts = |
13471 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13472 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; |
13473 | |
13474 | |
13475 | ShiftVal &= NumElts - 1; |
13476 | |
13477 | int Indices[16]; |
13478 | for (unsigned i = 0; i != NumElts; ++i) |
13479 | Indices[i] = i + ShiftVal; |
13480 | |
13481 | return Builder.CreateShuffleVector(Ops[1], Ops[0], |
13482 | makeArrayRef(Indices, NumElts), |
13483 | "valign"); |
13484 | } |
13485 | case X86::BI__builtin_ia32_shuf_f32x4_256: |
13486 | case X86::BI__builtin_ia32_shuf_f64x2_256: |
13487 | case X86::BI__builtin_ia32_shuf_i32x4_256: |
13488 | case X86::BI__builtin_ia32_shuf_i64x2_256: |
13489 | case X86::BI__builtin_ia32_shuf_f32x4: |
13490 | case X86::BI__builtin_ia32_shuf_f64x2: |
13491 | case X86::BI__builtin_ia32_shuf_i32x4: |
13492 | case X86::BI__builtin_ia32_shuf_i64x2: { |
13493 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13494 | auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13495 | unsigned NumElts = Ty->getNumElements(); |
13496 | unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; |
13497 | unsigned NumLaneElts = NumElts / NumLanes; |
13498 | |
13499 | int Indices[16]; |
13500 | for (unsigned l = 0; l != NumElts; l += NumLaneElts) { |
13501 | unsigned Index = (Imm % NumLanes) * NumLaneElts; |
13502 | Imm /= NumLanes; |
13503 | if (l >= (NumElts / 2)) |
13504 | Index += NumElts; |
13505 | for (unsigned i = 0; i != NumLaneElts; ++i) { |
13506 | Indices[l + i] = Index + i; |
13507 | } |
13508 | } |
13509 | |
13510 | return Builder.CreateShuffleVector(Ops[0], Ops[1], |
13511 | makeArrayRef(Indices, NumElts), |
13512 | "shuf"); |
13513 | } |
13514 | |
13515 | case X86::BI__builtin_ia32_vperm2f128_pd256: |
13516 | case X86::BI__builtin_ia32_vperm2f128_ps256: |
13517 | case X86::BI__builtin_ia32_vperm2f128_si256: |
13518 | case X86::BI__builtin_ia32_permti256: { |
13519 | unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); |
13520 | unsigned NumElts = |
13521 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
13522 | |
13523 | |
13524 | |
13525 | |
13526 | |
13527 | |
13528 | Value *OutOps[2]; |
13529 | int Indices[8]; |
13530 | for (unsigned l = 0; l != 2; ++l) { |
13531 | |
13532 | if (Imm & (1 << ((l * 4) + 3))) |
13533 | OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType()); |
13534 | else if (Imm & (1 << ((l * 4) + 1))) |
13535 | OutOps[l] = Ops[1]; |
13536 | else |
13537 | OutOps[l] = Ops[0]; |
13538 | |
13539 | for (unsigned i = 0; i != NumElts/2; ++i) { |
13540 | |
13541 | unsigned Idx = (l * NumElts) + i; |
13542 | |
13543 | |
13544 | if (Imm & (1 << (l * 4))) |
13545 | Idx += NumElts/2; |
13546 | Indices[(l * (NumElts/2)) + i] = Idx; |
13547 | } |
13548 | } |
13549 | |
13550 | return Builder.CreateShuffleVector(OutOps[0], OutOps[1], |
13551 | makeArrayRef(Indices, NumElts), |
13552 | "vperm"); |
13553 | } |
13554 | |
13555 | case X86::BI__builtin_ia32_pslldqi128_byteshift: |
13556 | case X86::BI__builtin_ia32_pslldqi256_byteshift: |
13557 | case X86::BI__builtin_ia32_pslldqi512_byteshift: { |
13558 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13559 | auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13560 | |
13561 | unsigned NumElts = ResultType->getNumElements() * 8; |
13562 | |
13563 | |
13564 | if (ShiftVal >= 16) |
13565 | return llvm::Constant::getNullValue(ResultType); |
13566 | |
13567 | int Indices[64]; |
13568 | |
13569 | for (unsigned l = 0; l != NumElts; l += 16) { |
13570 | for (unsigned i = 0; i != 16; ++i) { |
13571 | unsigned Idx = NumElts + i - ShiftVal; |
13572 | if (Idx < NumElts) Idx -= NumElts - 16; |
13573 | Indices[l + i] = Idx + l; |
13574 | } |
13575 | } |
13576 | |
13577 | auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts); |
13578 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); |
13579 | Value *Zero = llvm::Constant::getNullValue(VecTy); |
13580 | Value *SV = Builder.CreateShuffleVector(Zero, Cast, |
13581 | makeArrayRef(Indices, NumElts), |
13582 | "pslldq"); |
13583 | return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast"); |
13584 | } |
13585 | case X86::BI__builtin_ia32_psrldqi128_byteshift: |
13586 | case X86::BI__builtin_ia32_psrldqi256_byteshift: |
13587 | case X86::BI__builtin_ia32_psrldqi512_byteshift: { |
13588 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13589 | auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType()); |
13590 | |
13591 | unsigned NumElts = ResultType->getNumElements() * 8; |
13592 | |
13593 | |
13594 | if (ShiftVal >= 16) |
13595 | return llvm::Constant::getNullValue(ResultType); |
13596 | |
13597 | int Indices[64]; |
13598 | |
13599 | for (unsigned l = 0; l != NumElts; l += 16) { |
13600 | for (unsigned i = 0; i != 16; ++i) { |
13601 | unsigned Idx = i + ShiftVal; |
13602 | if (Idx >= 16) Idx += NumElts - 16; |
13603 | Indices[l + i] = Idx + l; |
13604 | } |
13605 | } |
13606 | |
13607 | auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts); |
13608 | Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); |
13609 | Value *Zero = llvm::Constant::getNullValue(VecTy); |
13610 | Value *SV = Builder.CreateShuffleVector(Cast, Zero, |
13611 | makeArrayRef(Indices, NumElts), |
13612 | "psrldq"); |
13613 | return Builder.CreateBitCast(SV, ResultType, "cast"); |
13614 | } |
13615 | case X86::BI__builtin_ia32_kshiftliqi: |
13616 | case X86::BI__builtin_ia32_kshiftlihi: |
13617 | case X86::BI__builtin_ia32_kshiftlisi: |
13618 | case X86::BI__builtin_ia32_kshiftlidi: { |
13619 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13620 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13621 | |
13622 | if (ShiftVal >= NumElts) |
13623 | return llvm::Constant::getNullValue(Ops[0]->getType()); |
13624 | |
13625 | Value *In = getMaskVecValue(*this, Ops[0], NumElts); |
13626 | |
13627 | int Indices[64]; |
13628 | for (unsigned i = 0; i != NumElts; ++i) |
13629 | Indices[i] = NumElts + i - ShiftVal; |
13630 | |
13631 | Value *Zero = llvm::Constant::getNullValue(In->getType()); |
13632 | Value *SV = Builder.CreateShuffleVector(Zero, In, |
13633 | makeArrayRef(Indices, NumElts), |
13634 | "kshiftl"); |
13635 | return Builder.CreateBitCast(SV, Ops[0]->getType()); |
13636 | } |
13637 | case X86::BI__builtin_ia32_kshiftriqi: |
13638 | case X86::BI__builtin_ia32_kshiftrihi: |
13639 | case X86::BI__builtin_ia32_kshiftrisi: |
13640 | case X86::BI__builtin_ia32_kshiftridi: { |
13641 | unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; |
13642 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13643 | |
13644 | if (ShiftVal >= NumElts) |
13645 | return llvm::Constant::getNullValue(Ops[0]->getType()); |
13646 | |
13647 | Value *In = getMaskVecValue(*this, Ops[0], NumElts); |
13648 | |
13649 | int Indices[64]; |
13650 | for (unsigned i = 0; i != NumElts; ++i) |
13651 | Indices[i] = i + ShiftVal; |
13652 | |
13653 | Value *Zero = llvm::Constant::getNullValue(In->getType()); |
13654 | Value *SV = Builder.CreateShuffleVector(In, Zero, |
13655 | makeArrayRef(Indices, NumElts), |
13656 | "kshiftr"); |
13657 | return Builder.CreateBitCast(SV, Ops[0]->getType()); |
13658 | } |
13659 | case X86::BI__builtin_ia32_movnti: |
13660 | case X86::BI__builtin_ia32_movnti64: |
13661 | case X86::BI__builtin_ia32_movntsd: |
13662 | case X86::BI__builtin_ia32_movntss: { |
13663 | llvm::MDNode *Node = llvm::MDNode::get( |
13664 | getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); |
13665 | |
13666 | Value *Ptr = Ops[0]; |
13667 | Value *Src = Ops[1]; |
13668 | |
13669 | |
13670 | if (BuiltinID == X86::BI__builtin_ia32_movntsd || |
13671 | BuiltinID == X86::BI__builtin_ia32_movntss) |
13672 | Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract"); |
13673 | |
13674 | |
13675 | Value *BC = Builder.CreateBitCast( |
13676 | Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast"); |
13677 | |
13678 | |
13679 | StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC); |
13680 | SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); |
13681 | SI->setAlignment(llvm::Align(1)); |
13682 | return SI; |
13683 | } |
13684 | |
13685 | case X86::BI__builtin_ia32_vprotb: |
13686 | case X86::BI__builtin_ia32_vprotw: |
13687 | case X86::BI__builtin_ia32_vprotd: |
13688 | case X86::BI__builtin_ia32_vprotq: |
13689 | case X86::BI__builtin_ia32_vprotbi: |
13690 | case X86::BI__builtin_ia32_vprotwi: |
13691 | case X86::BI__builtin_ia32_vprotdi: |
13692 | case X86::BI__builtin_ia32_vprotqi: |
13693 | case X86::BI__builtin_ia32_prold128: |
13694 | case X86::BI__builtin_ia32_prold256: |
13695 | case X86::BI__builtin_ia32_prold512: |
13696 | case X86::BI__builtin_ia32_prolq128: |
13697 | case X86::BI__builtin_ia32_prolq256: |
13698 | case X86::BI__builtin_ia32_prolq512: |
13699 | case X86::BI__builtin_ia32_prolvd128: |
13700 | case X86::BI__builtin_ia32_prolvd256: |
13701 | case X86::BI__builtin_ia32_prolvd512: |
13702 | case X86::BI__builtin_ia32_prolvq128: |
13703 | case X86::BI__builtin_ia32_prolvq256: |
13704 | case X86::BI__builtin_ia32_prolvq512: |
13705 | return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false); |
13706 | case X86::BI__builtin_ia32_prord128: |
13707 | case X86::BI__builtin_ia32_prord256: |
13708 | case X86::BI__builtin_ia32_prord512: |
13709 | case X86::BI__builtin_ia32_prorq128: |
13710 | case X86::BI__builtin_ia32_prorq256: |
13711 | case X86::BI__builtin_ia32_prorq512: |
13712 | case X86::BI__builtin_ia32_prorvd128: |
13713 | case X86::BI__builtin_ia32_prorvd256: |
13714 | case X86::BI__builtin_ia32_prorvd512: |
13715 | case X86::BI__builtin_ia32_prorvq128: |
13716 | case X86::BI__builtin_ia32_prorvq256: |
13717 | case X86::BI__builtin_ia32_prorvq512: |
13718 | return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true); |
13719 | case X86::BI__builtin_ia32_selectb_128: |
13720 | case X86::BI__builtin_ia32_selectb_256: |
13721 | case X86::BI__builtin_ia32_selectb_512: |
13722 | case X86::BI__builtin_ia32_selectw_128: |
13723 | case X86::BI__builtin_ia32_selectw_256: |
13724 | case X86::BI__builtin_ia32_selectw_512: |
13725 | case X86::BI__builtin_ia32_selectd_128: |
13726 | case X86::BI__builtin_ia32_selectd_256: |
13727 | case X86::BI__builtin_ia32_selectd_512: |
13728 | case X86::BI__builtin_ia32_selectq_128: |
13729 | case X86::BI__builtin_ia32_selectq_256: |
13730 | case X86::BI__builtin_ia32_selectq_512: |
13731 | case X86::BI__builtin_ia32_selectps_128: |
13732 | case X86::BI__builtin_ia32_selectps_256: |
13733 | case X86::BI__builtin_ia32_selectps_512: |
13734 | case X86::BI__builtin_ia32_selectpd_128: |
13735 | case X86::BI__builtin_ia32_selectpd_256: |
13736 | case X86::BI__builtin_ia32_selectpd_512: |
13737 | return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]); |
13738 | case X86::BI__builtin_ia32_selectss_128: |
13739 | case X86::BI__builtin_ia32_selectsd_128: { |
13740 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
13741 | Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
13742 | A = EmitX86ScalarSelect(*this, Ops[0], A, B); |
13743 | return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0); |
13744 | } |
13745 | case X86::BI__builtin_ia32_cmpb128_mask: |
13746 | case X86::BI__builtin_ia32_cmpb256_mask: |
13747 | case X86::BI__builtin_ia32_cmpb512_mask: |
13748 | case X86::BI__builtin_ia32_cmpw128_mask: |
13749 | case X86::BI__builtin_ia32_cmpw256_mask: |
13750 | case X86::BI__builtin_ia32_cmpw512_mask: |
13751 | case X86::BI__builtin_ia32_cmpd128_mask: |
13752 | case X86::BI__builtin_ia32_cmpd256_mask: |
13753 | case X86::BI__builtin_ia32_cmpd512_mask: |
13754 | case X86::BI__builtin_ia32_cmpq128_mask: |
13755 | case X86::BI__builtin_ia32_cmpq256_mask: |
13756 | case X86::BI__builtin_ia32_cmpq512_mask: { |
13757 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
13758 | return EmitX86MaskedCompare(*this, CC, true, Ops); |
13759 | } |
13760 | case X86::BI__builtin_ia32_ucmpb128_mask: |
13761 | case X86::BI__builtin_ia32_ucmpb256_mask: |
13762 | case X86::BI__builtin_ia32_ucmpb512_mask: |
13763 | case X86::BI__builtin_ia32_ucmpw128_mask: |
13764 | case X86::BI__builtin_ia32_ucmpw256_mask: |
13765 | case X86::BI__builtin_ia32_ucmpw512_mask: |
13766 | case X86::BI__builtin_ia32_ucmpd128_mask: |
13767 | case X86::BI__builtin_ia32_ucmpd256_mask: |
13768 | case X86::BI__builtin_ia32_ucmpd512_mask: |
13769 | case X86::BI__builtin_ia32_ucmpq128_mask: |
13770 | case X86::BI__builtin_ia32_ucmpq256_mask: |
13771 | case X86::BI__builtin_ia32_ucmpq512_mask: { |
13772 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; |
13773 | return EmitX86MaskedCompare(*this, CC, false, Ops); |
13774 | } |
13775 | case X86::BI__builtin_ia32_vpcomb: |
13776 | case X86::BI__builtin_ia32_vpcomw: |
13777 | case X86::BI__builtin_ia32_vpcomd: |
13778 | case X86::BI__builtin_ia32_vpcomq: |
13779 | return EmitX86vpcom(*this, Ops, true); |
13780 | case X86::BI__builtin_ia32_vpcomub: |
13781 | case X86::BI__builtin_ia32_vpcomuw: |
13782 | case X86::BI__builtin_ia32_vpcomud: |
13783 | case X86::BI__builtin_ia32_vpcomuq: |
13784 | return EmitX86vpcom(*this, Ops, false); |
13785 | |
13786 | case X86::BI__builtin_ia32_kortestcqi: |
13787 | case X86::BI__builtin_ia32_kortestchi: |
13788 | case X86::BI__builtin_ia32_kortestcsi: |
13789 | case X86::BI__builtin_ia32_kortestcdi: { |
13790 | Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops); |
13791 | Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType()); |
13792 | Value *Cmp = Builder.CreateICmpEQ(Or, C); |
13793 | return Builder.CreateZExt(Cmp, ConvertType(E->getType())); |
13794 | } |
13795 | case X86::BI__builtin_ia32_kortestzqi: |
13796 | case X86::BI__builtin_ia32_kortestzhi: |
13797 | case X86::BI__builtin_ia32_kortestzsi: |
13798 | case X86::BI__builtin_ia32_kortestzdi: { |
13799 | Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops); |
13800 | Value *C = llvm::Constant::getNullValue(Ops[0]->getType()); |
13801 | Value *Cmp = Builder.CreateICmpEQ(Or, C); |
13802 | return Builder.CreateZExt(Cmp, ConvertType(E->getType())); |
13803 | } |
13804 | |
13805 | case X86::BI__builtin_ia32_ktestcqi: |
13806 | case X86::BI__builtin_ia32_ktestzqi: |
13807 | case X86::BI__builtin_ia32_ktestchi: |
13808 | case X86::BI__builtin_ia32_ktestzhi: |
13809 | case X86::BI__builtin_ia32_ktestcsi: |
13810 | case X86::BI__builtin_ia32_ktestzsi: |
13811 | case X86::BI__builtin_ia32_ktestcdi: |
13812 | case X86::BI__builtin_ia32_ktestzdi: { |
13813 | Intrinsic::ID IID; |
13814 | switch (BuiltinID) { |
13815 | default: llvm_unreachable("Unsupported intrinsic!"); |
13816 | case X86::BI__builtin_ia32_ktestcqi: |
13817 | IID = Intrinsic::x86_avx512_ktestc_b; |
13818 | break; |
13819 | case X86::BI__builtin_ia32_ktestzqi: |
13820 | IID = Intrinsic::x86_avx512_ktestz_b; |
13821 | break; |
13822 | case X86::BI__builtin_ia32_ktestchi: |
13823 | IID = Intrinsic::x86_avx512_ktestc_w; |
13824 | break; |
13825 | case X86::BI__builtin_ia32_ktestzhi: |
13826 | IID = Intrinsic::x86_avx512_ktestz_w; |
13827 | break; |
13828 | case X86::BI__builtin_ia32_ktestcsi: |
13829 | IID = Intrinsic::x86_avx512_ktestc_d; |
13830 | break; |
13831 | case X86::BI__builtin_ia32_ktestzsi: |
13832 | IID = Intrinsic::x86_avx512_ktestz_d; |
13833 | break; |
13834 | case X86::BI__builtin_ia32_ktestcdi: |
13835 | IID = Intrinsic::x86_avx512_ktestc_q; |
13836 | break; |
13837 | case X86::BI__builtin_ia32_ktestzdi: |
13838 | IID = Intrinsic::x86_avx512_ktestz_q; |
13839 | break; |
13840 | } |
13841 | |
13842 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13843 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
13844 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
13845 | Function *Intr = CGM.getIntrinsic(IID); |
13846 | return Builder.CreateCall(Intr, {LHS, RHS}); |
13847 | } |
13848 | |
13849 | case X86::BI__builtin_ia32_kaddqi: |
13850 | case X86::BI__builtin_ia32_kaddhi: |
13851 | case X86::BI__builtin_ia32_kaddsi: |
13852 | case X86::BI__builtin_ia32_kadddi: { |
13853 | Intrinsic::ID IID; |
13854 | switch (BuiltinID) { |
13855 | default: llvm_unreachable("Unsupported intrinsic!"); |
13856 | case X86::BI__builtin_ia32_kaddqi: |
13857 | IID = Intrinsic::x86_avx512_kadd_b; |
13858 | break; |
13859 | case X86::BI__builtin_ia32_kaddhi: |
13860 | IID = Intrinsic::x86_avx512_kadd_w; |
13861 | break; |
13862 | case X86::BI__builtin_ia32_kaddsi: |
13863 | IID = Intrinsic::x86_avx512_kadd_d; |
13864 | break; |
13865 | case X86::BI__builtin_ia32_kadddi: |
13866 | IID = Intrinsic::x86_avx512_kadd_q; |
13867 | break; |
13868 | } |
13869 | |
13870 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13871 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
13872 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
13873 | Function *Intr = CGM.getIntrinsic(IID); |
13874 | Value *Res = Builder.CreateCall(Intr, {LHS, RHS}); |
13875 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
13876 | } |
13877 | case X86::BI__builtin_ia32_kandqi: |
13878 | case X86::BI__builtin_ia32_kandhi: |
13879 | case X86::BI__builtin_ia32_kandsi: |
13880 | case X86::BI__builtin_ia32_kanddi: |
13881 | return EmitX86MaskLogic(*this, Instruction::And, Ops); |
13882 | case X86::BI__builtin_ia32_kandnqi: |
13883 | case X86::BI__builtin_ia32_kandnhi: |
13884 | case X86::BI__builtin_ia32_kandnsi: |
13885 | case X86::BI__builtin_ia32_kandndi: |
13886 | return EmitX86MaskLogic(*this, Instruction::And, Ops, true); |
13887 | case X86::BI__builtin_ia32_korqi: |
13888 | case X86::BI__builtin_ia32_korhi: |
13889 | case X86::BI__builtin_ia32_korsi: |
13890 | case X86::BI__builtin_ia32_kordi: |
13891 | return EmitX86MaskLogic(*this, Instruction::Or, Ops); |
13892 | case X86::BI__builtin_ia32_kxnorqi: |
13893 | case X86::BI__builtin_ia32_kxnorhi: |
13894 | case X86::BI__builtin_ia32_kxnorsi: |
13895 | case X86::BI__builtin_ia32_kxnordi: |
13896 | return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true); |
13897 | case X86::BI__builtin_ia32_kxorqi: |
13898 | case X86::BI__builtin_ia32_kxorhi: |
13899 | case X86::BI__builtin_ia32_kxorsi: |
13900 | case X86::BI__builtin_ia32_kxordi: |
13901 | return EmitX86MaskLogic(*this, Instruction::Xor, Ops); |
13902 | case X86::BI__builtin_ia32_knotqi: |
13903 | case X86::BI__builtin_ia32_knothi: |
13904 | case X86::BI__builtin_ia32_knotsi: |
13905 | case X86::BI__builtin_ia32_knotdi: { |
13906 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13907 | Value *Res = getMaskVecValue(*this, Ops[0], NumElts); |
13908 | return Builder.CreateBitCast(Builder.CreateNot(Res), |
13909 | Ops[0]->getType()); |
13910 | } |
13911 | case X86::BI__builtin_ia32_kmovb: |
13912 | case X86::BI__builtin_ia32_kmovw: |
13913 | case X86::BI__builtin_ia32_kmovd: |
13914 | case X86::BI__builtin_ia32_kmovq: { |
13915 | |
13916 | |
13917 | |
13918 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13919 | Value *Res = getMaskVecValue(*this, Ops[0], NumElts); |
13920 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
13921 | } |
13922 | |
13923 | case X86::BI__builtin_ia32_kunpckdi: |
13924 | case X86::BI__builtin_ia32_kunpcksi: |
13925 | case X86::BI__builtin_ia32_kunpckhi: { |
13926 | unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); |
13927 | Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); |
13928 | Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); |
13929 | int Indices[64]; |
13930 | for (unsigned i = 0; i != NumElts; ++i) |
13931 | Indices[i] = i; |
13932 | |
13933 | |
13934 | |
13935 | LHS = Builder.CreateShuffleVector(LHS, LHS, |
13936 | makeArrayRef(Indices, NumElts / 2)); |
13937 | RHS = Builder.CreateShuffleVector(RHS, RHS, |
13938 | makeArrayRef(Indices, NumElts / 2)); |
13939 | |
13940 | |
13941 | Value *Res = Builder.CreateShuffleVector(RHS, LHS, |
13942 | makeArrayRef(Indices, NumElts)); |
13943 | return Builder.CreateBitCast(Res, Ops[0]->getType()); |
13944 | } |
13945 | |
13946 | case X86::BI__builtin_ia32_vplzcntd_128: |
13947 | case X86::BI__builtin_ia32_vplzcntd_256: |
13948 | case X86::BI__builtin_ia32_vplzcntd_512: |
13949 | case X86::BI__builtin_ia32_vplzcntq_128: |
13950 | case X86::BI__builtin_ia32_vplzcntq_256: |
13951 | case X86::BI__builtin_ia32_vplzcntq_512: { |
13952 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); |
13953 | return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)}); |
13954 | } |
13955 | case X86::BI__builtin_ia32_sqrtss: |
13956 | case X86::BI__builtin_ia32_sqrtsd: { |
13957 | Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0); |
13958 | Function *F; |
13959 | if (Builder.getIsFPConstrained()) { |
13960 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
13961 | F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, |
13962 | A->getType()); |
13963 | A = Builder.CreateConstrainedFPCall(F, {A}); |
13964 | } else { |
13965 | F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); |
13966 | A = Builder.CreateCall(F, {A}); |
13967 | } |
13968 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); |
13969 | } |
13970 | case X86::BI__builtin_ia32_sqrtsd_round_mask: |
13971 | case X86::BI__builtin_ia32_sqrtss_round_mask: { |
13972 | unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); |
13973 | |
13974 | |
13975 | if (CC != 4) { |
13976 | Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ? |
13977 | Intrinsic::x86_avx512_mask_sqrt_sd : |
13978 | Intrinsic::x86_avx512_mask_sqrt_ss; |
13979 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
13980 | } |
13981 | Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0); |
13982 | Function *F; |
13983 | if (Builder.getIsFPConstrained()) { |
13984 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
13985 | F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, |
13986 | A->getType()); |
13987 | A = Builder.CreateConstrainedFPCall(F, A); |
13988 | } else { |
13989 | F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); |
13990 | A = Builder.CreateCall(F, A); |
13991 | } |
13992 | Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0); |
13993 | A = EmitX86ScalarSelect(*this, Ops[3], A, Src); |
13994 | return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0); |
13995 | } |
13996 | case X86::BI__builtin_ia32_sqrtpd256: |
13997 | case X86::BI__builtin_ia32_sqrtpd: |
13998 | case X86::BI__builtin_ia32_sqrtps256: |
13999 | case X86::BI__builtin_ia32_sqrtps: |
14000 | case X86::BI__builtin_ia32_sqrtps512: |
14001 | case X86::BI__builtin_ia32_sqrtpd512: { |
14002 | if (Ops.size() == 2) { |
14003 | unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); |
14004 | |
14005 | |
14006 | if (CC != 4) { |
14007 | Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ? |
14008 | Intrinsic::x86_avx512_sqrt_ps_512 : |
14009 | Intrinsic::x86_avx512_sqrt_pd_512; |
14010 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
14011 | } |
14012 | } |
14013 | if (Builder.getIsFPConstrained()) { |
14014 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14015 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, |
14016 | Ops[0]->getType()); |
14017 | return Builder.CreateConstrainedFPCall(F, Ops[0]); |
14018 | } else { |
14019 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); |
14020 | return Builder.CreateCall(F, Ops[0]); |
14021 | } |
14022 | } |
14023 | case X86::BI__builtin_ia32_pabsb128: |
14024 | case X86::BI__builtin_ia32_pabsw128: |
14025 | case X86::BI__builtin_ia32_pabsd128: |
14026 | case X86::BI__builtin_ia32_pabsb256: |
14027 | case X86::BI__builtin_ia32_pabsw256: |
14028 | case X86::BI__builtin_ia32_pabsd256: |
14029 | case X86::BI__builtin_ia32_pabsq128: |
14030 | case X86::BI__builtin_ia32_pabsq256: |
14031 | case X86::BI__builtin_ia32_pabsb512: |
14032 | case X86::BI__builtin_ia32_pabsw512: |
14033 | case X86::BI__builtin_ia32_pabsd512: |
14034 | case X86::BI__builtin_ia32_pabsq512: { |
14035 | Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType()); |
14036 | return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); |
14037 | } |
14038 | case X86::BI__builtin_ia32_pmaxsb128: |
14039 | case X86::BI__builtin_ia32_pmaxsw128: |
14040 | case X86::BI__builtin_ia32_pmaxsd128: |
14041 | case X86::BI__builtin_ia32_pmaxsq128: |
14042 | case X86::BI__builtin_ia32_pmaxsb256: |
14043 | case X86::BI__builtin_ia32_pmaxsw256: |
14044 | case X86::BI__builtin_ia32_pmaxsd256: |
14045 | case X86::BI__builtin_ia32_pmaxsq256: |
14046 | case X86::BI__builtin_ia32_pmaxsb512: |
14047 | case X86::BI__builtin_ia32_pmaxsw512: |
14048 | case X86::BI__builtin_ia32_pmaxsd512: |
14049 | case X86::BI__builtin_ia32_pmaxsq512: |
14050 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax); |
14051 | case X86::BI__builtin_ia32_pmaxub128: |
14052 | case X86::BI__builtin_ia32_pmaxuw128: |
14053 | case X86::BI__builtin_ia32_pmaxud128: |
14054 | case X86::BI__builtin_ia32_pmaxuq128: |
14055 | case X86::BI__builtin_ia32_pmaxub256: |
14056 | case X86::BI__builtin_ia32_pmaxuw256: |
14057 | case X86::BI__builtin_ia32_pmaxud256: |
14058 | case X86::BI__builtin_ia32_pmaxuq256: |
14059 | case X86::BI__builtin_ia32_pmaxub512: |
14060 | case X86::BI__builtin_ia32_pmaxuw512: |
14061 | case X86::BI__builtin_ia32_pmaxud512: |
14062 | case X86::BI__builtin_ia32_pmaxuq512: |
14063 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax); |
14064 | case X86::BI__builtin_ia32_pminsb128: |
14065 | case X86::BI__builtin_ia32_pminsw128: |
14066 | case X86::BI__builtin_ia32_pminsd128: |
14067 | case X86::BI__builtin_ia32_pminsq128: |
14068 | case X86::BI__builtin_ia32_pminsb256: |
14069 | case X86::BI__builtin_ia32_pminsw256: |
14070 | case X86::BI__builtin_ia32_pminsd256: |
14071 | case X86::BI__builtin_ia32_pminsq256: |
14072 | case X86::BI__builtin_ia32_pminsb512: |
14073 | case X86::BI__builtin_ia32_pminsw512: |
14074 | case X86::BI__builtin_ia32_pminsd512: |
14075 | case X86::BI__builtin_ia32_pminsq512: |
14076 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin); |
14077 | case X86::BI__builtin_ia32_pminub128: |
14078 | case X86::BI__builtin_ia32_pminuw128: |
14079 | case X86::BI__builtin_ia32_pminud128: |
14080 | case X86::BI__builtin_ia32_pminuq128: |
14081 | case X86::BI__builtin_ia32_pminub256: |
14082 | case X86::BI__builtin_ia32_pminuw256: |
14083 | case X86::BI__builtin_ia32_pminud256: |
14084 | case X86::BI__builtin_ia32_pminuq256: |
14085 | case X86::BI__builtin_ia32_pminub512: |
14086 | case X86::BI__builtin_ia32_pminuw512: |
14087 | case X86::BI__builtin_ia32_pminud512: |
14088 | case X86::BI__builtin_ia32_pminuq512: |
14089 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin); |
14090 | |
14091 | case X86::BI__builtin_ia32_pmuludq128: |
14092 | case X86::BI__builtin_ia32_pmuludq256: |
14093 | case X86::BI__builtin_ia32_pmuludq512: |
14094 | return EmitX86Muldq(*this, false, Ops); |
14095 | |
14096 | case X86::BI__builtin_ia32_pmuldq128: |
14097 | case X86::BI__builtin_ia32_pmuldq256: |
14098 | case X86::BI__builtin_ia32_pmuldq512: |
14099 | return EmitX86Muldq(*this, true, Ops); |
14100 | |
14101 | case X86::BI__builtin_ia32_pternlogd512_mask: |
14102 | case X86::BI__builtin_ia32_pternlogq512_mask: |
14103 | case X86::BI__builtin_ia32_pternlogd128_mask: |
14104 | case X86::BI__builtin_ia32_pternlogd256_mask: |
14105 | case X86::BI__builtin_ia32_pternlogq128_mask: |
14106 | case X86::BI__builtin_ia32_pternlogq256_mask: |
14107 | return EmitX86Ternlog(*this, false, Ops); |
14108 | |
14109 | case X86::BI__builtin_ia32_pternlogd512_maskz: |
14110 | case X86::BI__builtin_ia32_pternlogq512_maskz: |
14111 | case X86::BI__builtin_ia32_pternlogd128_maskz: |
14112 | case X86::BI__builtin_ia32_pternlogd256_maskz: |
14113 | case X86::BI__builtin_ia32_pternlogq128_maskz: |
14114 | case X86::BI__builtin_ia32_pternlogq256_maskz: |
14115 | return EmitX86Ternlog(*this, true, Ops); |
14116 | |
14117 | case X86::BI__builtin_ia32_vpshldd128: |
14118 | case X86::BI__builtin_ia32_vpshldd256: |
14119 | case X86::BI__builtin_ia32_vpshldd512: |
14120 | case X86::BI__builtin_ia32_vpshldq128: |
14121 | case X86::BI__builtin_ia32_vpshldq256: |
14122 | case X86::BI__builtin_ia32_vpshldq512: |
14123 | case X86::BI__builtin_ia32_vpshldw128: |
14124 | case X86::BI__builtin_ia32_vpshldw256: |
14125 | case X86::BI__builtin_ia32_vpshldw512: |
14126 | return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false); |
14127 | |
14128 | case X86::BI__builtin_ia32_vpshrdd128: |
14129 | case X86::BI__builtin_ia32_vpshrdd256: |
14130 | case X86::BI__builtin_ia32_vpshrdd512: |
14131 | case X86::BI__builtin_ia32_vpshrdq128: |
14132 | case X86::BI__builtin_ia32_vpshrdq256: |
14133 | case X86::BI__builtin_ia32_vpshrdq512: |
14134 | case X86::BI__builtin_ia32_vpshrdw128: |
14135 | case X86::BI__builtin_ia32_vpshrdw256: |
14136 | case X86::BI__builtin_ia32_vpshrdw512: |
14137 | |
14138 | return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true); |
14139 | |
14140 | case X86::BI__builtin_ia32_vpshldvd128: |
14141 | case X86::BI__builtin_ia32_vpshldvd256: |
14142 | case X86::BI__builtin_ia32_vpshldvd512: |
14143 | case X86::BI__builtin_ia32_vpshldvq128: |
14144 | case X86::BI__builtin_ia32_vpshldvq256: |
14145 | case X86::BI__builtin_ia32_vpshldvq512: |
14146 | case X86::BI__builtin_ia32_vpshldvw128: |
14147 | case X86::BI__builtin_ia32_vpshldvw256: |
14148 | case X86::BI__builtin_ia32_vpshldvw512: |
14149 | return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false); |
14150 | |
14151 | case X86::BI__builtin_ia32_vpshrdvd128: |
14152 | case X86::BI__builtin_ia32_vpshrdvd256: |
14153 | case X86::BI__builtin_ia32_vpshrdvd512: |
14154 | case X86::BI__builtin_ia32_vpshrdvq128: |
14155 | case X86::BI__builtin_ia32_vpshrdvq256: |
14156 | case X86::BI__builtin_ia32_vpshrdvq512: |
14157 | case X86::BI__builtin_ia32_vpshrdvw128: |
14158 | case X86::BI__builtin_ia32_vpshrdvw256: |
14159 | case X86::BI__builtin_ia32_vpshrdvw512: |
14160 | |
14161 | return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true); |
14162 | |
14163 | |
14164 | case X86::BI__builtin_ia32_reduce_add_d512: |
14165 | case X86::BI__builtin_ia32_reduce_add_q512: { |
14166 | Function *F = |
14167 | CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType()); |
14168 | return Builder.CreateCall(F, {Ops[0]}); |
14169 | } |
14170 | case X86::BI__builtin_ia32_reduce_and_d512: |
14171 | case X86::BI__builtin_ia32_reduce_and_q512: { |
14172 | Function *F = |
14173 | CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType()); |
14174 | return Builder.CreateCall(F, {Ops[0]}); |
14175 | } |
14176 | case X86::BI__builtin_ia32_reduce_fadd_pd512: |
14177 | case X86::BI__builtin_ia32_reduce_fadd_ps512: { |
14178 | Function *F = |
14179 | CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType()); |
14180 | Builder.getFastMathFlags().setAllowReassoc(); |
14181 | return Builder.CreateCall(F, {Ops[0], Ops[1]}); |
14182 | } |
14183 | case X86::BI__builtin_ia32_reduce_fmul_pd512: |
14184 | case X86::BI__builtin_ia32_reduce_fmul_ps512: { |
14185 | Function *F = |
14186 | CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType()); |
14187 | Builder.getFastMathFlags().setAllowReassoc(); |
14188 | return Builder.CreateCall(F, {Ops[0], Ops[1]}); |
14189 | } |
14190 | case X86::BI__builtin_ia32_reduce_fmax_pd512: |
14191 | case X86::BI__builtin_ia32_reduce_fmax_ps512: { |
14192 | Function *F = |
14193 | CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType()); |
14194 | Builder.getFastMathFlags().setNoNaNs(); |
14195 | return Builder.CreateCall(F, {Ops[0]}); |
14196 | } |
14197 | case X86::BI__builtin_ia32_reduce_fmin_pd512: |
14198 | case X86::BI__builtin_ia32_reduce_fmin_ps512: { |
14199 | Function *F = |
14200 | CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType()); |
14201 | Builder.getFastMathFlags().setNoNaNs(); |
14202 | return Builder.CreateCall(F, {Ops[0]}); |
14203 | } |
14204 | case X86::BI__builtin_ia32_reduce_mul_d512: |
14205 | case X86::BI__builtin_ia32_reduce_mul_q512: { |
14206 | Function *F = |
14207 | CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType()); |
14208 | return Builder.CreateCall(F, {Ops[0]}); |
14209 | } |
14210 | case X86::BI__builtin_ia32_reduce_or_d512: |
14211 | case X86::BI__builtin_ia32_reduce_or_q512: { |
14212 | Function *F = |
14213 | CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType()); |
14214 | return Builder.CreateCall(F, {Ops[0]}); |
14215 | } |
14216 | case X86::BI__builtin_ia32_reduce_smax_d512: |
14217 | case X86::BI__builtin_ia32_reduce_smax_q512: { |
14218 | Function *F = |
14219 | CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType()); |
14220 | return Builder.CreateCall(F, {Ops[0]}); |
14221 | } |
14222 | case X86::BI__builtin_ia32_reduce_smin_d512: |
14223 | case X86::BI__builtin_ia32_reduce_smin_q512: { |
14224 | Function *F = |
14225 | CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType()); |
14226 | return Builder.CreateCall(F, {Ops[0]}); |
14227 | } |
14228 | case X86::BI__builtin_ia32_reduce_umax_d512: |
14229 | case X86::BI__builtin_ia32_reduce_umax_q512: { |
14230 | Function *F = |
14231 | CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType()); |
14232 | return Builder.CreateCall(F, {Ops[0]}); |
14233 | } |
14234 | case X86::BI__builtin_ia32_reduce_umin_d512: |
14235 | case X86::BI__builtin_ia32_reduce_umin_q512: { |
14236 | Function *F = |
14237 | CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType()); |
14238 | return Builder.CreateCall(F, {Ops[0]}); |
14239 | } |
14240 | |
14241 | |
14242 | case X86::BI__builtin_ia32_pswapdsf: |
14243 | case X86::BI__builtin_ia32_pswapdsi: { |
14244 | llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext()); |
14245 | Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast"); |
14246 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd); |
14247 | return Builder.CreateCall(F, Ops, "pswapd"); |
14248 | } |
14249 | case X86::BI__builtin_ia32_rdrand16_step: |
14250 | case X86::BI__builtin_ia32_rdrand32_step: |
14251 | case X86::BI__builtin_ia32_rdrand64_step: |
14252 | case X86::BI__builtin_ia32_rdseed16_step: |
14253 | case X86::BI__builtin_ia32_rdseed32_step: |
14254 | case X86::BI__builtin_ia32_rdseed64_step: { |
14255 | Intrinsic::ID ID; |
14256 | switch (BuiltinID) { |
14257 | default: llvm_unreachable("Unsupported intrinsic!"); |
14258 | case X86::BI__builtin_ia32_rdrand16_step: |
14259 | ID = Intrinsic::x86_rdrand_16; |
14260 | break; |
14261 | case X86::BI__builtin_ia32_rdrand32_step: |
14262 | ID = Intrinsic::x86_rdrand_32; |
14263 | break; |
14264 | case X86::BI__builtin_ia32_rdrand64_step: |
14265 | ID = Intrinsic::x86_rdrand_64; |
14266 | break; |
14267 | case X86::BI__builtin_ia32_rdseed16_step: |
14268 | ID = Intrinsic::x86_rdseed_16; |
14269 | break; |
14270 | case X86::BI__builtin_ia32_rdseed32_step: |
14271 | ID = Intrinsic::x86_rdseed_32; |
14272 | break; |
14273 | case X86::BI__builtin_ia32_rdseed64_step: |
14274 | ID = Intrinsic::x86_rdseed_64; |
14275 | break; |
14276 | } |
14277 | |
14278 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID)); |
14279 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0), |
14280 | Ops[0]); |
14281 | return Builder.CreateExtractValue(Call, 1); |
14282 | } |
14283 | case X86::BI__builtin_ia32_addcarryx_u32: |
14284 | case X86::BI__builtin_ia32_addcarryx_u64: |
14285 | case X86::BI__builtin_ia32_subborrow_u32: |
14286 | case X86::BI__builtin_ia32_subborrow_u64: { |
14287 | Intrinsic::ID IID; |
14288 | switch (BuiltinID) { |
14289 | default: llvm_unreachable("Unsupported intrinsic!"); |
14290 | case X86::BI__builtin_ia32_addcarryx_u32: |
14291 | IID = Intrinsic::x86_addcarry_32; |
14292 | break; |
14293 | case X86::BI__builtin_ia32_addcarryx_u64: |
14294 | IID = Intrinsic::x86_addcarry_64; |
14295 | break; |
14296 | case X86::BI__builtin_ia32_subborrow_u32: |
14297 | IID = Intrinsic::x86_subborrow_32; |
14298 | break; |
14299 | case X86::BI__builtin_ia32_subborrow_u64: |
14300 | IID = Intrinsic::x86_subborrow_64; |
14301 | break; |
14302 | } |
14303 | |
14304 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), |
14305 | { Ops[0], Ops[1], Ops[2] }); |
14306 | Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1), |
14307 | Ops[3]); |
14308 | return Builder.CreateExtractValue(Call, 0); |
14309 | } |
14310 | |
14311 | case X86::BI__builtin_ia32_fpclassps128_mask: |
14312 | case X86::BI__builtin_ia32_fpclassps256_mask: |
14313 | case X86::BI__builtin_ia32_fpclassps512_mask: |
14314 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
14315 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
14316 | case X86::BI__builtin_ia32_fpclasspd512_mask: { |
14317 | unsigned NumElts = |
14318 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14319 | Value *MaskIn = Ops[2]; |
14320 | Ops.erase(&Ops[2]); |
14321 | |
14322 | Intrinsic::ID ID; |
14323 | switch (BuiltinID) { |
14324 | default: llvm_unreachable("Unsupported intrinsic!"); |
14325 | case X86::BI__builtin_ia32_fpclassps128_mask: |
14326 | ID = Intrinsic::x86_avx512_fpclass_ps_128; |
14327 | break; |
14328 | case X86::BI__builtin_ia32_fpclassps256_mask: |
14329 | ID = Intrinsic::x86_avx512_fpclass_ps_256; |
14330 | break; |
14331 | case X86::BI__builtin_ia32_fpclassps512_mask: |
14332 | ID = Intrinsic::x86_avx512_fpclass_ps_512; |
14333 | break; |
14334 | case X86::BI__builtin_ia32_fpclasspd128_mask: |
14335 | ID = Intrinsic::x86_avx512_fpclass_pd_128; |
14336 | break; |
14337 | case X86::BI__builtin_ia32_fpclasspd256_mask: |
14338 | ID = Intrinsic::x86_avx512_fpclass_pd_256; |
14339 | break; |
14340 | case X86::BI__builtin_ia32_fpclasspd512_mask: |
14341 | ID = Intrinsic::x86_avx512_fpclass_pd_512; |
14342 | break; |
14343 | } |
14344 | |
14345 | Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14346 | return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn); |
14347 | } |
14348 | |
14349 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
14350 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
14351 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
14352 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
14353 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
14354 | case X86::BI__builtin_ia32_vp2intersect_d_128: { |
14355 | unsigned NumElts = |
14356 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14357 | Intrinsic::ID ID; |
14358 | |
14359 | switch (BuiltinID) { |
14360 | default: llvm_unreachable("Unsupported intrinsic!"); |
14361 | case X86::BI__builtin_ia32_vp2intersect_q_512: |
14362 | ID = Intrinsic::x86_avx512_vp2intersect_q_512; |
14363 | break; |
14364 | case X86::BI__builtin_ia32_vp2intersect_q_256: |
14365 | ID = Intrinsic::x86_avx512_vp2intersect_q_256; |
14366 | break; |
14367 | case X86::BI__builtin_ia32_vp2intersect_q_128: |
14368 | ID = Intrinsic::x86_avx512_vp2intersect_q_128; |
14369 | break; |
14370 | case X86::BI__builtin_ia32_vp2intersect_d_512: |
14371 | ID = Intrinsic::x86_avx512_vp2intersect_d_512; |
14372 | break; |
14373 | case X86::BI__builtin_ia32_vp2intersect_d_256: |
14374 | ID = Intrinsic::x86_avx512_vp2intersect_d_256; |
14375 | break; |
14376 | case X86::BI__builtin_ia32_vp2intersect_d_128: |
14377 | ID = Intrinsic::x86_avx512_vp2intersect_d_128; |
14378 | break; |
14379 | } |
14380 | |
14381 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]}); |
14382 | Value *Result = Builder.CreateExtractValue(Call, 0); |
14383 | Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr); |
14384 | Builder.CreateDefaultAlignedStore(Result, Ops[2]); |
14385 | |
14386 | Result = Builder.CreateExtractValue(Call, 1); |
14387 | Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr); |
14388 | return Builder.CreateDefaultAlignedStore(Result, Ops[3]); |
14389 | } |
14390 | |
14391 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
14392 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
14393 | case X86::BI__builtin_ia32_vpmultishiftqb512: { |
14394 | Intrinsic::ID ID; |
14395 | switch (BuiltinID) { |
14396 | default: llvm_unreachable("Unsupported intrinsic!"); |
14397 | case X86::BI__builtin_ia32_vpmultishiftqb128: |
14398 | ID = Intrinsic::x86_avx512_pmultishift_qb_128; |
14399 | break; |
14400 | case X86::BI__builtin_ia32_vpmultishiftqb256: |
14401 | ID = Intrinsic::x86_avx512_pmultishift_qb_256; |
14402 | break; |
14403 | case X86::BI__builtin_ia32_vpmultishiftqb512: |
14404 | ID = Intrinsic::x86_avx512_pmultishift_qb_512; |
14405 | break; |
14406 | } |
14407 | |
14408 | return Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14409 | } |
14410 | |
14411 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
14412 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
14413 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { |
14414 | unsigned NumElts = |
14415 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14416 | Value *MaskIn = Ops[2]; |
14417 | Ops.erase(&Ops[2]); |
14418 | |
14419 | Intrinsic::ID ID; |
14420 | switch (BuiltinID) { |
14421 | default: llvm_unreachable("Unsupported intrinsic!"); |
14422 | case X86::BI__builtin_ia32_vpshufbitqmb128_mask: |
14423 | ID = Intrinsic::x86_avx512_vpshufbitqmb_128; |
14424 | break; |
14425 | case X86::BI__builtin_ia32_vpshufbitqmb256_mask: |
14426 | ID = Intrinsic::x86_avx512_vpshufbitqmb_256; |
14427 | break; |
14428 | case X86::BI__builtin_ia32_vpshufbitqmb512_mask: |
14429 | ID = Intrinsic::x86_avx512_vpshufbitqmb_512; |
14430 | break; |
14431 | } |
14432 | |
14433 | Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); |
14434 | return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn); |
14435 | } |
14436 | |
14437 | |
14438 | case X86::BI__builtin_ia32_cmpeqps: |
14439 | case X86::BI__builtin_ia32_cmpeqpd: |
14440 | return getVectorFCmpIR(CmpInst::FCMP_OEQ, false); |
14441 | case X86::BI__builtin_ia32_cmpltps: |
14442 | case X86::BI__builtin_ia32_cmpltpd: |
14443 | return getVectorFCmpIR(CmpInst::FCMP_OLT, true); |
14444 | case X86::BI__builtin_ia32_cmpleps: |
14445 | case X86::BI__builtin_ia32_cmplepd: |
14446 | return getVectorFCmpIR(CmpInst::FCMP_OLE, true); |
14447 | case X86::BI__builtin_ia32_cmpunordps: |
14448 | case X86::BI__builtin_ia32_cmpunordpd: |
14449 | return getVectorFCmpIR(CmpInst::FCMP_UNO, false); |
14450 | case X86::BI__builtin_ia32_cmpneqps: |
14451 | case X86::BI__builtin_ia32_cmpneqpd: |
14452 | return getVectorFCmpIR(CmpInst::FCMP_UNE, false); |
14453 | case X86::BI__builtin_ia32_cmpnltps: |
14454 | case X86::BI__builtin_ia32_cmpnltpd: |
14455 | return getVectorFCmpIR(CmpInst::FCMP_UGE, true); |
14456 | case X86::BI__builtin_ia32_cmpnleps: |
14457 | case X86::BI__builtin_ia32_cmpnlepd: |
14458 | return getVectorFCmpIR(CmpInst::FCMP_UGT, true); |
14459 | case X86::BI__builtin_ia32_cmpordps: |
14460 | case X86::BI__builtin_ia32_cmpordpd: |
14461 | return getVectorFCmpIR(CmpInst::FCMP_ORD, false); |
14462 | case X86::BI__builtin_ia32_cmpps128_mask: |
14463 | case X86::BI__builtin_ia32_cmpps256_mask: |
14464 | case X86::BI__builtin_ia32_cmpps512_mask: |
14465 | case X86::BI__builtin_ia32_cmppd128_mask: |
14466 | case X86::BI__builtin_ia32_cmppd256_mask: |
14467 | case X86::BI__builtin_ia32_cmppd512_mask: |
14468 | IsMaskFCmp = true; |
14469 | LLVM_FALLTHROUGH; |
14470 | case X86::BI__builtin_ia32_cmpps: |
14471 | case X86::BI__builtin_ia32_cmpps256: |
14472 | case X86::BI__builtin_ia32_cmppd: |
14473 | case X86::BI__builtin_ia32_cmppd256: { |
14474 | |
14475 | |
14476 | |
14477 | |
14478 | |
14479 | |
14480 | |
14481 | unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f; |
14482 | |
14483 | |
14484 | |
14485 | |
14486 | FCmpInst::Predicate Pred; |
14487 | bool IsSignaling; |
14488 | |
14489 | |
14490 | switch (CC & 0xf) { |
14491 | case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break; |
14492 | case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break; |
14493 | case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break; |
14494 | case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break; |
14495 | case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break; |
14496 | case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break; |
14497 | case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break; |
14498 | case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break; |
14499 | case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break; |
14500 | case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break; |
14501 | case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break; |
14502 | case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break; |
14503 | case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break; |
14504 | case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break; |
14505 | case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break; |
14506 | case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break; |
14507 | default: llvm_unreachable("Unhandled CC"); |
14508 | } |
14509 | |
14510 | |
14511 | if (CC & 0x10) |
14512 | IsSignaling = !IsSignaling; |
14513 | |
14514 | |
14515 | |
14516 | |
14517 | |
14518 | |
14519 | if (Builder.getIsFPConstrained() && |
14520 | (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE || |
14521 | IsMaskFCmp)) { |
14522 | |
14523 | Intrinsic::ID IID; |
14524 | switch (BuiltinID) { |
14525 | default: llvm_unreachable("Unexpected builtin"); |
14526 | case X86::BI__builtin_ia32_cmpps: |
14527 | IID = Intrinsic::x86_sse_cmp_ps; |
14528 | break; |
14529 | case X86::BI__builtin_ia32_cmpps256: |
14530 | IID = Intrinsic::x86_avx_cmp_ps_256; |
14531 | break; |
14532 | case X86::BI__builtin_ia32_cmppd: |
14533 | IID = Intrinsic::x86_sse2_cmp_pd; |
14534 | break; |
14535 | case X86::BI__builtin_ia32_cmppd256: |
14536 | IID = Intrinsic::x86_avx_cmp_pd_256; |
14537 | break; |
14538 | case X86::BI__builtin_ia32_cmpps512_mask: |
14539 | IID = Intrinsic::x86_avx512_mask_cmp_ps_512; |
14540 | break; |
14541 | case X86::BI__builtin_ia32_cmppd512_mask: |
14542 | IID = Intrinsic::x86_avx512_mask_cmp_pd_512; |
14543 | break; |
14544 | case X86::BI__builtin_ia32_cmpps128_mask: |
14545 | IID = Intrinsic::x86_avx512_mask_cmp_ps_128; |
14546 | break; |
14547 | case X86::BI__builtin_ia32_cmpps256_mask: |
14548 | IID = Intrinsic::x86_avx512_mask_cmp_ps_256; |
14549 | break; |
14550 | case X86::BI__builtin_ia32_cmppd128_mask: |
14551 | IID = Intrinsic::x86_avx512_mask_cmp_pd_128; |
14552 | break; |
14553 | case X86::BI__builtin_ia32_cmppd256_mask: |
14554 | IID = Intrinsic::x86_avx512_mask_cmp_pd_256; |
14555 | break; |
14556 | } |
14557 | |
14558 | Function *Intr = CGM.getIntrinsic(IID); |
14559 | if (IsMaskFCmp) { |
14560 | unsigned NumElts = |
14561 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14562 | Ops[3] = getMaskVecValue(*this, Ops[3], NumElts); |
14563 | Value *Cmp = Builder.CreateCall(Intr, Ops); |
14564 | return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr); |
14565 | } |
14566 | |
14567 | return Builder.CreateCall(Intr, Ops); |
14568 | } |
14569 | |
14570 | |
14571 | |
14572 | if (IsMaskFCmp) { |
14573 | |
14574 | |
14575 | |
14576 | |
14577 | unsigned NumElts = |
14578 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); |
14579 | Value *Cmp; |
14580 | if (IsSignaling) |
14581 | Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); |
14582 | else |
14583 | Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]); |
14584 | return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]); |
14585 | } |
14586 | |
14587 | return getVectorFCmpIR(Pred, IsSignaling); |
14588 | } |
14589 | |
14590 | |
14591 | case X86::BI__builtin_ia32_cmpeqss: |
14592 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0); |
14593 | case X86::BI__builtin_ia32_cmpltss: |
14594 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1); |
14595 | case X86::BI__builtin_ia32_cmpless: |
14596 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2); |
14597 | case X86::BI__builtin_ia32_cmpunordss: |
14598 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3); |
14599 | case X86::BI__builtin_ia32_cmpneqss: |
14600 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4); |
14601 | case X86::BI__builtin_ia32_cmpnltss: |
14602 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5); |
14603 | case X86::BI__builtin_ia32_cmpnless: |
14604 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6); |
14605 | case X86::BI__builtin_ia32_cmpordss: |
14606 | return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7); |
14607 | case X86::BI__builtin_ia32_cmpeqsd: |
14608 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0); |
14609 | case X86::BI__builtin_ia32_cmpltsd: |
14610 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1); |
14611 | case X86::BI__builtin_ia32_cmplesd: |
14612 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2); |
14613 | case X86::BI__builtin_ia32_cmpunordsd: |
14614 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3); |
14615 | case X86::BI__builtin_ia32_cmpneqsd: |
14616 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4); |
14617 | case X86::BI__builtin_ia32_cmpnltsd: |
14618 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5); |
14619 | case X86::BI__builtin_ia32_cmpnlesd: |
14620 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6); |
14621 | case X86::BI__builtin_ia32_cmpordsd: |
14622 | return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7); |
14623 | |
14624 | |
14625 | case X86::BI__builtin_ia32_vcvtph2ps: |
14626 | case X86::BI__builtin_ia32_vcvtph2ps256: |
14627 | case X86::BI__builtin_ia32_vcvtph2ps_mask: |
14628 | case X86::BI__builtin_ia32_vcvtph2ps256_mask: |
14629 | case X86::BI__builtin_ia32_vcvtph2ps512_mask: { |
14630 | CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); |
14631 | return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType())); |
14632 | } |
14633 | |
14634 | |
14635 | case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { |
14636 | Ops[2] = getMaskVecValue( |
14637 | *this, Ops[2], |
14638 | cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements()); |
14639 | Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; |
14640 | return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); |
14641 | } |
14642 | case X86::BI__builtin_ia32_cvtsbf162ss_32: |
14643 | return EmitX86CvtBF16ToFloatExpr(*this, E, Ops); |
14644 | |
14645 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
14646 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: { |
14647 | Intrinsic::ID IID; |
14648 | switch (BuiltinID) { |
14649 | default: llvm_unreachable("Unsupported intrinsic!"); |
14650 | case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: |
14651 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256; |
14652 | break; |
14653 | case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: |
14654 | IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512; |
14655 | break; |
14656 | } |
14657 | Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]); |
14658 | return EmitX86Select(*this, Ops[2], Res, Ops[1]); |
14659 | } |
14660 | |
14661 | case X86::BI__emul: |
14662 | case X86::BI__emulu: { |
14663 | llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64); |
14664 | bool isSigned = (BuiltinID == X86::BI__emul); |
14665 | Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned); |
14666 | Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned); |
14667 | return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned); |
14668 | } |
14669 | case X86::BI__mulh: |
14670 | case X86::BI__umulh: |
14671 | case X86::BI_mul128: |
14672 | case X86::BI_umul128: { |
14673 | llvm::Type *ResType = ConvertType(E->getType()); |
14674 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
14675 | |
14676 | bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128); |
14677 | Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned); |
14678 | Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned); |
14679 | |
14680 | Value *MulResult, *HigherBits; |
14681 | if (IsSigned) { |
14682 | MulResult = Builder.CreateNSWMul(LHS, RHS); |
14683 | HigherBits = Builder.CreateAShr(MulResult, 64); |
14684 | } else { |
14685 | MulResult = Builder.CreateNUWMul(LHS, RHS); |
14686 | HigherBits = Builder.CreateLShr(MulResult, 64); |
14687 | } |
14688 | HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned); |
14689 | |
14690 | if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh) |
14691 | return HigherBits; |
14692 | |
14693 | Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2)); |
14694 | Builder.CreateStore(HigherBits, HighBitsAddress); |
14695 | return Builder.CreateIntCast(MulResult, ResType, IsSigned); |
14696 | } |
14697 | |
14698 | case X86::BI__faststorefence: { |
14699 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
14700 | llvm::SyncScope::System); |
14701 | } |
14702 | case X86::BI__shiftleft128: |
14703 | case X86::BI__shiftright128: { |
14704 | llvm::Function *F = CGM.getIntrinsic( |
14705 | BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr, |
14706 | Int64Ty); |
14707 | |
14708 | |
14709 | |
14710 | std::swap(Ops[0], Ops[1]); |
14711 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
14712 | return Builder.CreateCall(F, Ops); |
14713 | } |
14714 | case X86::BI_ReadWriteBarrier: |
14715 | case X86::BI_ReadBarrier: |
14716 | case X86::BI_WriteBarrier: { |
14717 | return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, |
14718 | llvm::SyncScope::SingleThread); |
14719 | } |
14720 | |
14721 | case X86::BI_AddressOfReturnAddress: { |
14722 | Function *F = |
14723 | CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy); |
14724 | return Builder.CreateCall(F); |
14725 | } |
14726 | case X86::BI__stosb: { |
14727 | |
14728 | |
14729 | return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true); |
14730 | } |
14731 | case X86::BI__ud2: |
14732 | |
14733 | return EmitTrapCall(Intrinsic::trap); |
14734 | case X86::BI__int2c: { |
14735 | |
14736 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); |
14737 | llvm::InlineAsm *IA = |
14738 | llvm::InlineAsm::get(FTy, "int $$0x2c", "", true); |
14739 | llvm::AttributeList NoReturnAttr = llvm::AttributeList::get( |
14740 | getLLVMContext(), llvm::AttributeList::FunctionIndex, |
14741 | llvm::Attribute::NoReturn); |
14742 | llvm::CallInst *CI = Builder.CreateCall(IA); |
14743 | CI->setAttributes(NoReturnAttr); |
14744 | return CI; |
14745 | } |
14746 | case X86::BI__readfsbyte: |
14747 | case X86::BI__readfsword: |
14748 | case X86::BI__readfsdword: |
14749 | case X86::BI__readfsqword: { |
14750 | llvm::Type *IntTy = ConvertType(E->getType()); |
14751 | Value *Ptr = |
14752 | Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257)); |
14753 | LoadInst *Load = Builder.CreateAlignedLoad( |
14754 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); |
14755 | Load->setVolatile(true); |
14756 | return Load; |
14757 | } |
14758 | case X86::BI__readgsbyte: |
14759 | case X86::BI__readgsword: |
14760 | case X86::BI__readgsdword: |
14761 | case X86::BI__readgsqword: { |
14762 | llvm::Type *IntTy = ConvertType(E->getType()); |
14763 | Value *Ptr = |
14764 | Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256)); |
14765 | LoadInst *Load = Builder.CreateAlignedLoad( |
14766 | IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); |
14767 | Load->setVolatile(true); |
14768 | return Load; |
14769 | } |
14770 | case X86::BI__builtin_ia32_paddsb512: |
14771 | case X86::BI__builtin_ia32_paddsw512: |
14772 | case X86::BI__builtin_ia32_paddsb256: |
14773 | case X86::BI__builtin_ia32_paddsw256: |
14774 | case X86::BI__builtin_ia32_paddsb128: |
14775 | case X86::BI__builtin_ia32_paddsw128: |
14776 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat); |
14777 | case X86::BI__builtin_ia32_paddusb512: |
14778 | case X86::BI__builtin_ia32_paddusw512: |
14779 | case X86::BI__builtin_ia32_paddusb256: |
14780 | case X86::BI__builtin_ia32_paddusw256: |
14781 | case X86::BI__builtin_ia32_paddusb128: |
14782 | case X86::BI__builtin_ia32_paddusw128: |
14783 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat); |
14784 | case X86::BI__builtin_ia32_psubsb512: |
14785 | case X86::BI__builtin_ia32_psubsw512: |
14786 | case X86::BI__builtin_ia32_psubsb256: |
14787 | case X86::BI__builtin_ia32_psubsw256: |
14788 | case X86::BI__builtin_ia32_psubsb128: |
14789 | case X86::BI__builtin_ia32_psubsw128: |
14790 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat); |
14791 | case X86::BI__builtin_ia32_psubusb512: |
14792 | case X86::BI__builtin_ia32_psubusw512: |
14793 | case X86::BI__builtin_ia32_psubusb256: |
14794 | case X86::BI__builtin_ia32_psubusw256: |
14795 | case X86::BI__builtin_ia32_psubusb128: |
14796 | case X86::BI__builtin_ia32_psubusw128: |
14797 | return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat); |
14798 | case X86::BI__builtin_ia32_encodekey128_u32: { |
14799 | Intrinsic::ID IID = Intrinsic::x86_encodekey128; |
14800 | |
14801 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]}); |
14802 | |
14803 | for (int i = 0; i < 6; ++i) { |
14804 | Value *Extract = Builder.CreateExtractValue(Call, i + 1); |
14805 | Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16); |
14806 | Ptr = Builder.CreateBitCast( |
14807 | Ptr, llvm::PointerType::getUnqual(Extract->getType())); |
14808 | Builder.CreateAlignedStore(Extract, Ptr, Align(1)); |
14809 | } |
14810 | |
14811 | return Builder.CreateExtractValue(Call, 0); |
14812 | } |
14813 | case X86::BI__builtin_ia32_encodekey256_u32: { |
14814 | Intrinsic::ID IID = Intrinsic::x86_encodekey256; |
14815 | |
14816 | Value *Call = |
14817 | Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]}); |
14818 | |
14819 | for (int i = 0; i < 7; ++i) { |
14820 | Value *Extract = Builder.CreateExtractValue(Call, i + 1); |
14821 | Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16); |
14822 | Ptr = Builder.CreateBitCast( |
14823 | Ptr, llvm::PointerType::getUnqual(Extract->getType())); |
14824 | Builder.CreateAlignedStore(Extract, Ptr, Align(1)); |
14825 | } |
14826 | |
14827 | return Builder.CreateExtractValue(Call, 0); |
14828 | } |
14829 | case X86::BI__builtin_ia32_aesenc128kl_u8: |
14830 | case X86::BI__builtin_ia32_aesdec128kl_u8: |
14831 | case X86::BI__builtin_ia32_aesenc256kl_u8: |
14832 | case X86::BI__builtin_ia32_aesdec256kl_u8: { |
14833 | Intrinsic::ID IID; |
14834 | StringRef BlockName; |
14835 | switch (BuiltinID) { |
14836 | default: |
14837 | llvm_unreachable("Unexpected builtin"); |
14838 | case X86::BI__builtin_ia32_aesenc128kl_u8: |
14839 | IID = Intrinsic::x86_aesenc128kl; |
14840 | BlockName = "aesenc128kl"; |
14841 | break; |
14842 | case X86::BI__builtin_ia32_aesdec128kl_u8: |
14843 | IID = Intrinsic::x86_aesdec128kl; |
14844 | BlockName = "aesdec128kl"; |
14845 | break; |
14846 | case X86::BI__builtin_ia32_aesenc256kl_u8: |
14847 | IID = Intrinsic::x86_aesenc256kl; |
14848 | BlockName = "aesenc256kl"; |
14849 | break; |
14850 | case X86::BI__builtin_ia32_aesdec256kl_u8: |
14851 | IID = Intrinsic::x86_aesdec256kl; |
14852 | BlockName = "aesdec256kl"; |
14853 | break; |
14854 | } |
14855 | |
14856 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]}); |
14857 | |
14858 | BasicBlock *NoError = |
14859 | createBasicBlock(BlockName + "_no_error", this->CurFn); |
14860 | BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn); |
14861 | BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn); |
14862 | |
14863 | Value *Ret = Builder.CreateExtractValue(Call, 0); |
14864 | Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty()); |
14865 | Value *Out = Builder.CreateExtractValue(Call, 1); |
14866 | Builder.CreateCondBr(Succ, NoError, Error); |
14867 | |
14868 | Builder.SetInsertPoint(NoError); |
14869 | Builder.CreateDefaultAlignedStore(Out, Ops[0]); |
14870 | Builder.CreateBr(End); |
14871 | |
14872 | Builder.SetInsertPoint(Error); |
14873 | Constant *Zero = llvm::Constant::getNullValue(Out->getType()); |
14874 | Builder.CreateDefaultAlignedStore(Zero, Ops[0]); |
14875 | Builder.CreateBr(End); |
14876 | |
14877 | Builder.SetInsertPoint(End); |
14878 | return Builder.CreateExtractValue(Call, 0); |
14879 | } |
14880 | case X86::BI__builtin_ia32_aesencwide128kl_u8: |
14881 | case X86::BI__builtin_ia32_aesdecwide128kl_u8: |
14882 | case X86::BI__builtin_ia32_aesencwide256kl_u8: |
14883 | case X86::BI__builtin_ia32_aesdecwide256kl_u8: { |
14884 | Intrinsic::ID IID; |
14885 | StringRef BlockName; |
14886 | switch (BuiltinID) { |
14887 | case X86::BI__builtin_ia32_aesencwide128kl_u8: |
14888 | IID = Intrinsic::x86_aesencwide128kl; |
14889 | BlockName = "aesencwide128kl"; |
14890 | break; |
14891 | case X86::BI__builtin_ia32_aesdecwide128kl_u8: |
14892 | IID = Intrinsic::x86_aesdecwide128kl; |
14893 | BlockName = "aesdecwide128kl"; |
14894 | break; |
14895 | case X86::BI__builtin_ia32_aesencwide256kl_u8: |
14896 | IID = Intrinsic::x86_aesencwide256kl; |
14897 | BlockName = "aesencwide256kl"; |
14898 | break; |
14899 | case X86::BI__builtin_ia32_aesdecwide256kl_u8: |
14900 | IID = Intrinsic::x86_aesdecwide256kl; |
14901 | BlockName = "aesdecwide256kl"; |
14902 | break; |
14903 | } |
14904 | |
14905 | llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2); |
14906 | Value *InOps[9]; |
14907 | InOps[0] = Ops[2]; |
14908 | for (int i = 0; i != 8; ++i) { |
14909 | Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i); |
14910 | InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16)); |
14911 | } |
14912 | |
14913 | Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps); |
14914 | |
14915 | BasicBlock *NoError = |
14916 | createBasicBlock(BlockName + "_no_error", this->CurFn); |
14917 | BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn); |
14918 | BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn); |
14919 | |
14920 | Value *Ret = Builder.CreateExtractValue(Call, 0); |
14921 | Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty()); |
14922 | Builder.CreateCondBr(Succ, NoError, Error); |
14923 | |
14924 | Builder.SetInsertPoint(NoError); |
14925 | for (int i = 0; i != 8; ++i) { |
14926 | Value *Extract = Builder.CreateExtractValue(Call, i + 1); |
14927 | Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i); |
14928 | Builder.CreateAlignedStore(Extract, Ptr, Align(16)); |
14929 | } |
14930 | Builder.CreateBr(End); |
14931 | |
14932 | Builder.SetInsertPoint(Error); |
14933 | for (int i = 0; i != 8; ++i) { |
14934 | Value *Out = Builder.CreateExtractValue(Call, i + 1); |
14935 | Constant *Zero = llvm::Constant::getNullValue(Out->getType()); |
14936 | Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i); |
14937 | Builder.CreateAlignedStore(Zero, Ptr, Align(16)); |
14938 | } |
14939 | Builder.CreateBr(End); |
14940 | |
14941 | Builder.SetInsertPoint(End); |
14942 | return Builder.CreateExtractValue(Call, 0); |
14943 | } |
14944 | } |
14945 | } |
14946 | |
14947 | Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, |
14948 | const CallExpr *E) { |
14949 | SmallVector<Value*, 4> Ops; |
14950 | |
14951 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) |
| 1 | Assuming 'i' is equal to 'e' | |
|
| 2 | | Loop condition is false. Execution continues on line 14954 | |
|
14952 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
14953 | |
14954 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
14955 | |
14956 | switch (BuiltinID) { |
| 3 | | Control jumps to 'case BI__builtin_altivec_vec_replace_unaligned:' at line 15268 | |
|
14957 | default: return nullptr; |
14958 | |
14959 | |
14960 | |
14961 | case PPC::BI__builtin_ppc_get_timebase: |
14962 | return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter)); |
14963 | |
14964 | |
14965 | case PPC::BI__builtin_altivec_lvx: |
14966 | case PPC::BI__builtin_altivec_lvxl: |
14967 | case PPC::BI__builtin_altivec_lvebx: |
14968 | case PPC::BI__builtin_altivec_lvehx: |
14969 | case PPC::BI__builtin_altivec_lvewx: |
14970 | case PPC::BI__builtin_altivec_lvsl: |
14971 | case PPC::BI__builtin_altivec_lvsr: |
14972 | case PPC::BI__builtin_vsx_lxvd2x: |
14973 | case PPC::BI__builtin_vsx_lxvw4x: |
14974 | case PPC::BI__builtin_vsx_lxvd2x_be: |
14975 | case PPC::BI__builtin_vsx_lxvw4x_be: |
14976 | case PPC::BI__builtin_vsx_lxvl: |
14977 | case PPC::BI__builtin_vsx_lxvll: |
14978 | { |
14979 | if(BuiltinID == PPC::BI__builtin_vsx_lxvl || |
14980 | BuiltinID == PPC::BI__builtin_vsx_lxvll){ |
14981 | Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy); |
14982 | }else { |
14983 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
14984 | Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]); |
14985 | Ops.pop_back(); |
14986 | } |
14987 | |
14988 | switch (BuiltinID) { |
14989 | default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!"); |
14990 | case PPC::BI__builtin_altivec_lvx: |
14991 | ID = Intrinsic::ppc_altivec_lvx; |
14992 | break; |
14993 | case PPC::BI__builtin_altivec_lvxl: |
14994 | ID = Intrinsic::ppc_altivec_lvxl; |
14995 | break; |
14996 | case PPC::BI__builtin_altivec_lvebx: |
14997 | ID = Intrinsic::ppc_altivec_lvebx; |
14998 | break; |
14999 | case PPC::BI__builtin_altivec_lvehx: |
15000 | ID = Intrinsic::ppc_altivec_lvehx; |
15001 | break; |
15002 | case PPC::BI__builtin_altivec_lvewx: |
15003 | ID = Intrinsic::ppc_altivec_lvewx; |
15004 | break; |
15005 | case PPC::BI__builtin_altivec_lvsl: |
15006 | ID = Intrinsic::ppc_altivec_lvsl; |
15007 | break; |
15008 | case PPC::BI__builtin_altivec_lvsr: |
15009 | ID = Intrinsic::ppc_altivec_lvsr; |
15010 | break; |
15011 | case PPC::BI__builtin_vsx_lxvd2x: |
15012 | ID = Intrinsic::ppc_vsx_lxvd2x; |
15013 | break; |
15014 | case PPC::BI__builtin_vsx_lxvw4x: |
15015 | ID = Intrinsic::ppc_vsx_lxvw4x; |
15016 | break; |
15017 | case PPC::BI__builtin_vsx_lxvd2x_be: |
15018 | ID = Intrinsic::ppc_vsx_lxvd2x_be; |
15019 | break; |
15020 | case PPC::BI__builtin_vsx_lxvw4x_be: |
15021 | ID = Intrinsic::ppc_vsx_lxvw4x_be; |
15022 | break; |
15023 | case PPC::BI__builtin_vsx_lxvl: |
15024 | ID = Intrinsic::ppc_vsx_lxvl; |
15025 | break; |
15026 | case PPC::BI__builtin_vsx_lxvll: |
15027 | ID = Intrinsic::ppc_vsx_lxvll; |
15028 | break; |
15029 | } |
15030 | llvm::Function *F = CGM.getIntrinsic(ID); |
15031 | return Builder.CreateCall(F, Ops, ""); |
15032 | } |
15033 | |
15034 | |
15035 | case PPC::BI__builtin_altivec_stvx: |
15036 | case PPC::BI__builtin_altivec_stvxl: |
15037 | case PPC::BI__builtin_altivec_stvebx: |
15038 | case PPC::BI__builtin_altivec_stvehx: |
15039 | case PPC::BI__builtin_altivec_stvewx: |
15040 | case PPC::BI__builtin_vsx_stxvd2x: |
15041 | case PPC::BI__builtin_vsx_stxvw4x: |
15042 | case PPC::BI__builtin_vsx_stxvd2x_be: |
15043 | case PPC::BI__builtin_vsx_stxvw4x_be: |
15044 | case PPC::BI__builtin_vsx_stxvl: |
15045 | case PPC::BI__builtin_vsx_stxvll: |
15046 | { |
15047 | if(BuiltinID == PPC::BI__builtin_vsx_stxvl || |
15048 | BuiltinID == PPC::BI__builtin_vsx_stxvll ){ |
15049 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
15050 | }else { |
15051 | Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); |
15052 | Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]); |
15053 | Ops.pop_back(); |
15054 | } |
15055 | |
15056 | switch (BuiltinID) { |
15057 | default: llvm_unreachable("Unsupported st intrinsic!"); |
15058 | case PPC::BI__builtin_altivec_stvx: |
15059 | ID = Intrinsic::ppc_altivec_stvx; |
15060 | break; |
15061 | case PPC::BI__builtin_altivec_stvxl: |
15062 | ID = Intrinsic::ppc_altivec_stvxl; |
15063 | break; |
15064 | case PPC::BI__builtin_altivec_stvebx: |
15065 | ID = Intrinsic::ppc_altivec_stvebx; |
15066 | break; |
15067 | case PPC::BI__builtin_altivec_stvehx: |
15068 | ID = Intrinsic::ppc_altivec_stvehx; |
15069 | break; |
15070 | case PPC::BI__builtin_altivec_stvewx: |
15071 | ID = Intrinsic::ppc_altivec_stvewx; |
15072 | break; |
15073 | case PPC::BI__builtin_vsx_stxvd2x: |
15074 | ID = Intrinsic::ppc_vsx_stxvd2x; |
15075 | break; |
15076 | case PPC::BI__builtin_vsx_stxvw4x: |
15077 | ID = Intrinsic::ppc_vsx_stxvw4x; |
15078 | break; |
15079 | case PPC::BI__builtin_vsx_stxvd2x_be: |
15080 | ID = Intrinsic::ppc_vsx_stxvd2x_be; |
15081 | break; |
15082 | case PPC::BI__builtin_vsx_stxvw4x_be: |
15083 | ID = Intrinsic::ppc_vsx_stxvw4x_be; |
15084 | break; |
15085 | case PPC::BI__builtin_vsx_stxvl: |
15086 | ID = Intrinsic::ppc_vsx_stxvl; |
15087 | break; |
15088 | case PPC::BI__builtin_vsx_stxvll: |
15089 | ID = Intrinsic::ppc_vsx_stxvll; |
15090 | break; |
15091 | } |
15092 | llvm::Function *F = CGM.getIntrinsic(ID); |
15093 | return Builder.CreateCall(F, Ops, ""); |
15094 | } |
15095 | case PPC::BI__builtin_vsx_ldrmb: { |
15096 | |
15097 | |
15098 | |
15099 | int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue(); |
15100 | llvm::Type *ResTy = ConvertType(E->getType()); |
15101 | bool IsLE = getTarget().isLittleEndian(); |
15102 | |
15103 | |
15104 | if (NumBytes == 16) { |
15105 | Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo()); |
15106 | Value *LD = Builder.CreateLoad(Address(BC, CharUnits::fromQuantity(1))); |
15107 | if (!IsLE) |
15108 | return LD; |
15109 | |
15110 | |
15111 | SmallVector<int, 16> RevMask; |
15112 | for (int Idx = 0; Idx < 16; Idx++) |
15113 | RevMask.push_back(15 - Idx); |
15114 | return Builder.CreateShuffleVector(LD, LD, RevMask); |
15115 | } |
15116 | |
15117 | llvm::Function *Lvx = CGM.getIntrinsic(Intrinsic::ppc_altivec_lvx); |
15118 | llvm::Function *Lvs = CGM.getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr |
15119 | : Intrinsic::ppc_altivec_lvsl); |
15120 | llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm); |
15121 | Value *HiMem = Builder.CreateGEP( |
15122 | Int8Ty, Ops[0], ConstantInt::get(Ops[1]->getType(), NumBytes - 1)); |
15123 | Value *LoLd = Builder.CreateCall(Lvx, Ops[0], "ld.lo"); |
15124 | Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi"); |
15125 | Value *Mask1 = Builder.CreateCall(Lvs, Ops[0], "mask1"); |
15126 | |
15127 | Ops.clear(); |
15128 | Ops.push_back(IsLE ? HiLd : LoLd); |
15129 | Ops.push_back(IsLE ? LoLd : HiLd); |
15130 | Ops.push_back(Mask1); |
15131 | Value *AllElts = Builder.CreateCall(Vperm, Ops, "shuffle1"); |
15132 | Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType()); |
15133 | |
15134 | if (IsLE) { |
15135 | SmallVector<int, 16> Consts; |
15136 | for (int Idx = 0; Idx < 16; Idx++) { |
15137 | int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1) |
15138 | : 16 - (NumBytes - Idx); |
15139 | Consts.push_back(Val); |
15140 | } |
15141 | return Builder.CreateShuffleVector(Builder.CreateBitCast(AllElts, ResTy), |
15142 | Zero, Consts); |
15143 | } |
15144 | SmallVector<Constant *, 16> Consts; |
15145 | for (int Idx = 0; Idx < 16; Idx++) |
15146 | Consts.push_back(Builder.getInt8(NumBytes + Idx)); |
15147 | Value *Mask2 = ConstantVector::get(Consts); |
15148 | return Builder.CreateBitCast( |
15149 | Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy); |
15150 | } |
15151 | case PPC::BI__builtin_vsx_strmb: { |
15152 | int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue(); |
15153 | bool IsLE = getTarget().isLittleEndian(); |
15154 | auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) { |
15155 | |
15156 | |
15157 | if (Width == 16) { |
15158 | Value *BC = |
15159 | Builder.CreateBitCast(Ops[0], Ops[2]->getType()->getPointerTo()); |
15160 | Value *StVec = Ops[2]; |
15161 | if (IsLE) { |
15162 | SmallVector<int, 16> RevMask; |
15163 | for (int Idx = 0; Idx < 16; Idx++) |
15164 | RevMask.push_back(15 - Idx); |
15165 | StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask); |
15166 | } |
15167 | return Builder.CreateStore(StVec, |
15168 | Address(BC, CharUnits::fromQuantity(1))); |
15169 | } |
15170 | auto *ConvTy = Int64Ty; |
15171 | unsigned NumElts = 0; |
15172 | switch (Width) { |
15173 | default: |
15174 | llvm_unreachable("width for stores must be a power of 2"); |
15175 | case 8: |
15176 | ConvTy = Int64Ty; |
15177 | NumElts = 2; |
15178 | break; |
15179 | case 4: |
15180 | ConvTy = Int32Ty; |
15181 | NumElts = 4; |
15182 | break; |
15183 | case 2: |
15184 | ConvTy = Int16Ty; |
15185 | NumElts = 8; |
15186 | break; |
15187 | case 1: |
15188 | ConvTy = Int8Ty; |
15189 | NumElts = 16; |
15190 | break; |
15191 | } |
15192 | Value *Vec = Builder.CreateBitCast( |
15193 | Ops[2], llvm::FixedVectorType::get(ConvTy, NumElts)); |
15194 | Value *Ptr = Builder.CreateGEP(Int8Ty, Ops[0], |
15195 | ConstantInt::get(Int64Ty, Offset)); |
15196 | Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo()); |
15197 | Value *Elt = Builder.CreateExtractElement(Vec, EltNo); |
15198 | if (IsLE && Width > 1) { |
15199 | Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy); |
15200 | Elt = Builder.CreateCall(F, Elt); |
15201 | } |
15202 | return Builder.CreateStore(Elt, |
15203 | Address(PtrBC, CharUnits::fromQuantity(1))); |
15204 | }; |
15205 | unsigned Stored = 0; |
15206 | unsigned RemainingBytes = NumBytes; |
15207 | Value *Result; |
15208 | if (NumBytes == 16) |
15209 | return StoreSubVec(16, 0, 0); |
15210 | if (NumBytes >= 8) { |
15211 | Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1); |
15212 | RemainingBytes -= 8; |
15213 | Stored += 8; |
15214 | } |
15215 | if (RemainingBytes >= 4) { |
15216 | Result = StoreSubVec(4, NumBytes - Stored - 4, |
15217 | IsLE ? (Stored >> 2) : 3 - (Stored >> 2)); |
15218 | RemainingBytes -= 4; |
15219 | Stored += 4; |
15220 | } |
15221 | if (RemainingBytes >= 2) { |
15222 | Result = StoreSubVec(2, NumBytes - Stored - 2, |
15223 | IsLE ? (Stored >> 1) : 7 - (Stored >> 1)); |
15224 | RemainingBytes -= 2; |
15225 | Stored += 2; |
15226 | } |
15227 | if (RemainingBytes) |
15228 | Result = |
15229 | StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored); |
15230 | return Result; |
15231 | } |
15232 | |
15233 | case PPC::BI__builtin_vsx_xvsqrtsp: |
15234 | case PPC::BI__builtin_vsx_xvsqrtdp: { |
15235 | llvm::Type *ResultType = ConvertType(E->getType()); |
15236 | Value *X = EmitScalarExpr(E->getArg(0)); |
15237 | if (Builder.getIsFPConstrained()) { |
15238 | llvm::Function *F = CGM.getIntrinsic( |
15239 | Intrinsic::experimental_constrained_sqrt, ResultType); |
15240 | return Builder.CreateConstrainedFPCall(F, X); |
15241 | } else { |
15242 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
15243 | return Builder.CreateCall(F, X); |
15244 | } |
15245 | } |
15246 | |
15247 | case PPC::BI__builtin_altivec_vclzb: |
15248 | case PPC::BI__builtin_altivec_vclzh: |
15249 | case PPC::BI__builtin_altivec_vclzw: |
15250 | case PPC::BI__builtin_altivec_vclzd: { |
15251 | llvm::Type *ResultType = ConvertType(E->getType()); |
15252 | Value *X = EmitScalarExpr(E->getArg(0)); |
15253 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
15254 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); |
15255 | return Builder.CreateCall(F, {X, Undef}); |
15256 | } |
15257 | case PPC::BI__builtin_altivec_vctzb: |
15258 | case PPC::BI__builtin_altivec_vctzh: |
15259 | case PPC::BI__builtin_altivec_vctzw: |
15260 | case PPC::BI__builtin_altivec_vctzd: { |
15261 | llvm::Type *ResultType = ConvertType(E->getType()); |
15262 | Value *X = EmitScalarExpr(E->getArg(0)); |
15263 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
15264 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); |
15265 | return Builder.CreateCall(F, {X, Undef}); |
15266 | } |
15267 | case PPC::BI__builtin_altivec_vec_replace_elt: |
15268 | case PPC::BI__builtin_altivec_vec_replace_unaligned: { |
15269 | |
15270 | |
15271 | |
15272 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
| 4 | | Assuming the object is not a 'ConstantInt' | |
|
| 5 | | 'ArgCI' initialized to a null pointer value | |
|
15273 | assert(ArgCI && |
15274 | "Third Arg to vinsw/vinsd intrinsic must be a constant integer!"); |
15275 | llvm::Type *ResultType = ConvertType(E->getType()); |
15276 | llvm::Function *F = nullptr; |
15277 | Value *Call = nullptr; |
15278 | int64_t ConstArg = ArgCI->getSExtValue(); |
| 6 | | Called C++ object pointer is null |
|
15279 | unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits(); |
15280 | bool Is32Bit = false; |
15281 | assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width"); |
15282 | |
15283 | if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt) |
15284 | ConstArg *= ArgWidth / 8; |
15285 | if (ArgWidth == 32) { |
15286 | Is32Bit = true; |
15287 | |
15288 | |
15289 | F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw); |
15290 | |
15291 | if (getTarget().isLittleEndian()) |
15292 | ConstArg = 12 - ConstArg; |
15293 | } else { |
15294 | |
15295 | |
15296 | F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd); |
15297 | |
15298 | if (getTarget().isLittleEndian()) |
15299 | ConstArg = 8 - ConstArg; |
15300 | } |
15301 | Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg); |
15302 | |
15303 | |
15304 | |
15305 | if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) { |
15306 | Ops[0] = Builder.CreateBitCast( |
15307 | Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4) |
15308 | : llvm::FixedVectorType::get(Int64Ty, 2)); |
15309 | Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty); |
15310 | } |
15311 | |
15312 | Call = Builder.CreateCall(F, Ops); |
15313 | |
15314 | if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt && |
15315 | !Ops[1]->getType()->isIntegerTy()) |
15316 | return Builder.CreateBitCast(Call, ResultType); |
15317 | else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt && |
15318 | Ops[1]->getType()->isIntegerTy()) |
15319 | return Call; |
15320 | else |
15321 | return Builder.CreateBitCast(Call, |
15322 | llvm::FixedVectorType::get(Int8Ty, 16)); |
15323 | } |
15324 | case PPC::BI__builtin_altivec_vpopcntb: |
15325 | case PPC::BI__builtin_altivec_vpopcnth: |
15326 | case PPC::BI__builtin_altivec_vpopcntw: |
15327 | case PPC::BI__builtin_altivec_vpopcntd: { |
15328 | llvm::Type *ResultType = ConvertType(E->getType()); |
15329 | Value *X = EmitScalarExpr(E->getArg(0)); |
15330 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
15331 | return Builder.CreateCall(F, X); |
15332 | } |
15333 | case PPC::BI__builtin_altivec_vadduqm: |
15334 | case PPC::BI__builtin_altivec_vsubuqm: { |
15335 | llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128); |
15336 | Ops[0] = |
15337 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int128Ty, 1)); |
15338 | Ops[1] = |
15339 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int128Ty, 1)); |
15340 | if (BuiltinID == PPC::BI__builtin_altivec_vadduqm) |
15341 | return Builder.CreateAdd(Ops[0], Ops[1], "vadduqm"); |
15342 | else |
15343 | return Builder.CreateSub(Ops[0], Ops[1], "vsubuqm"); |
15344 | } |
15345 | |
15346 | |
15347 | |
15348 | |
15349 | |
15350 | case PPC::BI__builtin_ppc_rldimi: |
15351 | case PPC::BI__builtin_ppc_rlwimi: { |
15352 | llvm::Type *Ty = Ops[0]->getType(); |
15353 | Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); |
15354 | if (BuiltinID == PPC::BI__builtin_ppc_rldimi) |
15355 | Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); |
15356 | Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[2]}); |
15357 | Value *X = Builder.CreateAnd(Shift, Ops[3]); |
15358 | Value *Y = Builder.CreateAnd(Ops[1], Builder.CreateNot(Ops[3])); |
15359 | return Builder.CreateOr(X, Y); |
15360 | } |
15361 | |
15362 | |
15363 | |
15364 | case PPC::BI__builtin_ppc_rlwnm: { |
15365 | llvm::Type *Ty = Ops[0]->getType(); |
15366 | Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); |
15367 | Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[1]}); |
15368 | return Builder.CreateAnd(Shift, Ops[2]); |
15369 | } |
15370 | case PPC::BI__builtin_ppc_poppar4: |
15371 | case PPC::BI__builtin_ppc_poppar8: { |
15372 | llvm::Type *ArgType = Ops[0]->getType(); |
15373 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); |
15374 | Value *Tmp = Builder.CreateCall(F, Ops[0]); |
15375 | |
15376 | llvm::Type *ResultType = ConvertType(E->getType()); |
15377 | Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); |
15378 | if (Result->getType() != ResultType) |
15379 | Result = Builder.CreateIntCast(Result, ResultType, true, |
15380 | "cast"); |
15381 | return Result; |
15382 | } |
15383 | case PPC::BI__builtin_ppc_cmpb: { |
15384 | if (getTarget().getTriple().isPPC64()) { |
15385 | Function *F = |
15386 | CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty}); |
15387 | return Builder.CreateCall(F, Ops, "cmpb"); |
15388 | } |
15389 | |
15390 | |
15391 | |
15392 | |
15393 | |
15394 | |
15395 | |
15396 | |
15397 | |
15398 | |
15399 | |
15400 | |
15401 | |
15402 | |
15403 | Function *F = |
15404 | CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty}); |
15405 | Value *ArgOneLo = Builder.CreateTrunc(Ops[0], Int32Ty); |
15406 | Value *ArgTwoLo = Builder.CreateTrunc(Ops[1], Int32Ty); |
15407 | Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32); |
15408 | Value *ArgOneHi = |
15409 | Builder.CreateTrunc(Builder.CreateLShr(Ops[0], ShiftAmt), Int32Ty); |
15410 | Value *ArgTwoHi = |
15411 | Builder.CreateTrunc(Builder.CreateLShr(Ops[1], ShiftAmt), Int32Ty); |
15412 | Value *ResLo = Builder.CreateZExt( |
15413 | Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty); |
15414 | Value *ResHiShift = Builder.CreateZExt( |
15415 | Builder.CreateCall(F, {ArgOneHi, ArgTwoHi}, "cmpb"), Int64Ty); |
15416 | Value *ResHi = Builder.CreateShl(ResHiShift, ShiftAmt); |
15417 | return Builder.CreateOr(ResLo, ResHi); |
15418 | } |
15419 | |
15420 | case PPC::BI__builtin_vsx_xvcpsgnsp: |
15421 | case PPC::BI__builtin_vsx_xvcpsgndp: { |
15422 | llvm::Type *ResultType = ConvertType(E->getType()); |
15423 | Value *X = EmitScalarExpr(E->getArg(0)); |
15424 | Value *Y = EmitScalarExpr(E->getArg(1)); |
15425 | ID = Intrinsic::copysign; |
15426 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); |
15427 | return Builder.CreateCall(F, {X, Y}); |
15428 | } |
15429 | |
15430 | case PPC::BI__builtin_vsx_xvrspip: |
15431 | case PPC::BI__builtin_vsx_xvrdpip: |
15432 | case PPC::BI__builtin_vsx_xvrdpim: |
15433 | case PPC::BI__builtin_vsx_xvrspim: |
15434 | case PPC::BI__builtin_vsx_xvrdpi: |
15435 | case PPC::BI__builtin_vsx_xvrspi: |
15436 | case PPC::BI__builtin_vsx_xvrdpic: |
15437 | case PPC::BI__builtin_vsx_xvrspic: |
15438 | case PPC::BI__builtin_vsx_xvrdpiz: |
15439 | case PPC::BI__builtin_vsx_xvrspiz: { |
15440 | llvm::Type *ResultType = ConvertType(E->getType()); |
15441 | Value *X = EmitScalarExpr(E->getArg(0)); |
15442 | if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim || |
15443 | BuiltinID == PPC::BI__builtin_vsx_xvrspim) |
15444 | ID = Builder.getIsFPConstrained() |
15445 | ? Intrinsic::experimental_constrained_floor |
15446 | : Intrinsic::floor; |
15447 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi || |
15448 | BuiltinID == PPC::BI__builtin_vsx_xvrspi) |
15449 | ID = Builder.getIsFPConstrained() |
15450 | ? Intrinsic::experimental_constrained_round |
15451 | : Intrinsic::round; |
15452 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic || |
15453 | BuiltinID == PPC::BI__builtin_vsx_xvrspic) |
15454 | ID = Builder.getIsFPConstrained() |
15455 | ? Intrinsic::experimental_constrained_rint |
15456 | : Intrinsic::rint; |
15457 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip || |
15458 | BuiltinID == PPC::BI__builtin_vsx_xvrspip) |
15459 | ID = Builder.getIsFPConstrained() |
15460 | ? Intrinsic::experimental_constrained_ceil |
15461 | : Intrinsic::ceil; |
15462 | else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz || |
15463 | BuiltinID == PPC::BI__builtin_vsx_xvrspiz) |
15464 | ID = Builder.getIsFPConstrained() |
15465 | ? Intrinsic::experimental_constrained_trunc |
15466 | : Intrinsic::trunc; |
15467 | llvm::Function *F = CGM.getIntrinsic(ID, ResultType); |
15468 | return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X) |
15469 | : Builder.CreateCall(F, X); |
15470 | } |
15471 | |
15472 | |
15473 | case PPC::BI__builtin_vsx_xvabsdp: |
15474 | case PPC::BI__builtin_vsx_xvabssp: { |
15475 | llvm::Type *ResultType = ConvertType(E->getType()); |
15476 | Value *X = EmitScalarExpr(E->getArg(0)); |
15477 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
15478 | return Builder.CreateCall(F, X); |
15479 | } |
15480 | |
15481 | |
15482 | case PPC::BI__builtin_ppc_recipdivf: |
15483 | case PPC::BI__builtin_ppc_recipdivd: |
15484 | case PPC::BI__builtin_ppc_rsqrtf: |
15485 | case PPC::BI__builtin_ppc_rsqrtd: { |
15486 | FastMathFlags FMF = Builder.getFastMathFlags(); |
15487 | Builder.getFastMathFlags().setFast(); |
15488 | llvm::Type *ResultType = ConvertType(E->getType()); |
15489 | Value *X = EmitScalarExpr(E->getArg(0)); |
15490 | |
15491 | if (BuiltinID == PPC::BI__builtin_ppc_recipdivf || |
15492 | BuiltinID == PPC::BI__builtin_ppc_recipdivd) { |
15493 | Value *Y = EmitScalarExpr(E->getArg(1)); |
15494 | Value *FDiv = Builder.CreateFDiv(X, Y, "recipdiv"); |
15495 | Builder.getFastMathFlags() &= (FMF); |
15496 | return FDiv; |
15497 | } |
15498 | auto *One = ConstantFP::get(ResultType, 1.0); |
15499 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
15500 | Value *FDiv = Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt"); |
15501 | Builder.getFastMathFlags() &= (FMF); |
15502 | return FDiv; |
15503 | } |
15504 | case PPC::BI__builtin_ppc_alignx: { |
15505 | ConstantInt *AlignmentCI = cast<ConstantInt>(Ops[0]); |
15506 | if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) |
15507 | AlignmentCI = ConstantInt::get(AlignmentCI->getType(), |
15508 | llvm::Value::MaximumAlignment); |
15509 | |
15510 | emitAlignmentAssumption(Ops[1], E->getArg(1), |
15511 | SourceLocation(), |
15512 | AlignmentCI, nullptr); |
15513 | return Ops[1]; |
15514 | } |
15515 | case PPC::BI__builtin_ppc_rdlam: { |
15516 | llvm::Type *Ty = Ops[0]->getType(); |
15517 | Value *ShiftAmt = Builder.CreateIntCast(Ops[1], Ty, false); |
15518 | Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); |
15519 | Value *Rotate = Builder.CreateCall(F, {Ops[0], Ops[0], ShiftAmt}); |
15520 | return Builder.CreateAnd(Rotate, Ops[2]); |
15521 | } |
15522 | |
15523 | case PPC::BI__builtin_vsx_xvmaddadp: |
15524 | case PPC::BI__builtin_vsx_xvmaddasp: |
15525 | case PPC::BI__builtin_vsx_xvnmaddadp: |
15526 | case PPC::BI__builtin_vsx_xvnmaddasp: |
15527 | case PPC::BI__builtin_vsx_xvmsubadp: |
15528 | case PPC::BI__builtin_vsx_xvmsubasp: |
15529 | case PPC::BI__builtin_vsx_xvnmsubadp: |
15530 | case PPC::BI__builtin_vsx_xvnmsubasp: { |
15531 | llvm::Type *ResultType = ConvertType(E->getType()); |
15532 | Value *X = EmitScalarExpr(E->getArg(0)); |
15533 | Value *Y = EmitScalarExpr(E->getArg(1)); |
15534 | Value *Z = EmitScalarExpr(E->getArg(2)); |
15535 | llvm::Function *F; |
15536 | if (Builder.getIsFPConstrained()) |
15537 | F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
15538 | else |
15539 | F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
15540 | switch (BuiltinID) { |
15541 | case PPC::BI__builtin_vsx_xvmaddadp: |
15542 | case PPC::BI__builtin_vsx_xvmaddasp: |
15543 | if (Builder.getIsFPConstrained()) |
15544 | return Builder.CreateConstrainedFPCall(F, {X, Y, Z}); |
15545 | else |
15546 | return Builder.CreateCall(F, {X, Y, Z}); |
15547 | case PPC::BI__builtin_vsx_xvnmaddadp: |
15548 | case PPC::BI__builtin_vsx_xvnmaddasp: |
15549 | if (Builder.getIsFPConstrained()) |
15550 | return Builder.CreateFNeg( |
15551 | Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); |
15552 | else |
15553 | return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); |
15554 | case PPC::BI__builtin_vsx_xvmsubadp: |
15555 | case PPC::BI__builtin_vsx_xvmsubasp: |
15556 | if (Builder.getIsFPConstrained()) |
15557 | return Builder.CreateConstrainedFPCall( |
15558 | F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
15559 | else |
15560 | return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
15561 | case PPC::BI__builtin_vsx_xvnmsubadp: |
15562 | case PPC::BI__builtin_vsx_xvnmsubasp: |
15563 | if (Builder.getIsFPConstrained()) |
15564 | return Builder.CreateFNeg( |
15565 | Builder.CreateConstrainedFPCall( |
15566 | F, {X, Y, Builder.CreateFNeg(Z, "neg")}), |
15567 | "neg"); |
15568 | else |
15569 | return Builder.CreateFNeg( |
15570 | Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}), |
15571 | "neg"); |
15572 | } |
15573 | llvm_unreachable("Unknown FMA operation"); |
15574 | return nullptr; |
15575 | } |
15576 | |
15577 | case PPC::BI__builtin_vsx_insertword: { |
15578 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw); |
15579 | |
15580 | |
15581 | |
15582 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
15583 | assert(ArgCI && |
15584 | "Third arg to xxinsertw intrinsic must be constant integer"); |
15585 | const int64_t MaxIndex = 12; |
15586 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); |
15587 | |
15588 | |
15589 | |
15590 | |
15591 | |
15592 | |
15593 | std::swap(Ops[0], Ops[1]); |
15594 | |
15595 | |
15596 | |
15597 | Ops[1] = |
15598 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2)); |
15599 | |
15600 | if (getTarget().isLittleEndian()) { |
15601 | |
15602 | Ops[0] = |
15603 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2)); |
15604 | Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0}); |
15605 | |
15606 | |
15607 | Index = MaxIndex - Index; |
15608 | } |
15609 | |
15610 | |
15611 | Ops[0] = |
15612 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4)); |
15613 | Ops[2] = ConstantInt::getSigned(Int32Ty, Index); |
15614 | return Builder.CreateCall(F, Ops); |
15615 | } |
15616 | |
15617 | case PPC::BI__builtin_vsx_extractuword: { |
15618 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw); |
15619 | |
15620 | |
15621 | Ops[0] = |
15622 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2)); |
15623 | |
15624 | |
15625 | |
15626 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]); |
15627 | assert(ArgCI && |
15628 | "Second Arg to xxextractuw intrinsic must be a constant integer!"); |
15629 | const int64_t MaxIndex = 12; |
15630 | int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex); |
15631 | |
15632 | if (getTarget().isLittleEndian()) { |
15633 | |
15634 | Index = MaxIndex - Index; |
15635 | Ops[1] = ConstantInt::getSigned(Int32Ty, Index); |
15636 | |
15637 | |
15638 | Value *Call = Builder.CreateCall(F, Ops); |
15639 | |
15640 | Value *ShuffleCall = |
15641 | Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0}); |
15642 | return ShuffleCall; |
15643 | } else { |
15644 | Ops[1] = ConstantInt::getSigned(Int32Ty, Index); |
15645 | return Builder.CreateCall(F, Ops); |
15646 | } |
15647 | } |
15648 | |
15649 | case PPC::BI__builtin_vsx_xxpermdi: { |
15650 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
15651 | assert(ArgCI && "Third arg must be constant integer!"); |
15652 | |
15653 | unsigned Index = ArgCI->getZExtValue(); |
15654 | Ops[0] = |
15655 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2)); |
15656 | Ops[1] = |
15657 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2)); |
15658 | |
15659 | |
15660 | |
15661 | |
15662 | int ElemIdx0 = (Index & 2) >> 1; |
15663 | int ElemIdx1 = 2 + (Index & 1); |
15664 | |
15665 | int ShuffleElts[2] = {ElemIdx0, ElemIdx1}; |
15666 | Value *ShuffleCall = |
15667 | Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts); |
15668 | QualType BIRetType = E->getType(); |
15669 | auto RetTy = ConvertType(BIRetType); |
15670 | return Builder.CreateBitCast(ShuffleCall, RetTy); |
15671 | } |
15672 | |
15673 | case PPC::BI__builtin_vsx_xxsldwi: { |
15674 | ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]); |
15675 | assert(ArgCI && "Third argument must be a compile time constant"); |
15676 | unsigned Index = ArgCI->getZExtValue() & 0x3; |
15677 | Ops[0] = |
15678 | Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4)); |
15679 | Ops[1] = |
15680 | Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4)); |
15681 | |
15682 | |
15683 | int ElemIdx0; |
15684 | int ElemIdx1; |
15685 | int ElemIdx2; |
15686 | int ElemIdx3; |
15687 | if (getTarget().isLittleEndian()) { |
15688 | |
15689 | |
15690 | |
15691 | ElemIdx0 = (8 - Index) % 8; |
15692 | ElemIdx1 = (9 - Index) % 8; |
15693 | ElemIdx2 = (10 - Index) % 8; |
15694 | ElemIdx3 = (11 - Index) % 8; |
15695 | } else { |
15696 | |
15697 | ElemIdx0 = Index; |
15698 | ElemIdx1 = Index + 1; |
15699 | ElemIdx2 = Index + 2; |
15700 | ElemIdx3 = Index + 3; |
15701 | } |
15702 | |
15703 | int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3}; |
15704 | Value *ShuffleCall = |
15705 | Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts); |
15706 | QualType BIRetType = E->getType(); |
15707 | auto RetTy = ConvertType(BIRetType); |
15708 | return Builder.CreateBitCast(ShuffleCall, RetTy); |
15709 | } |
15710 | |
15711 | case PPC::BI__builtin_pack_vector_int128: { |
15712 | bool isLittleEndian = getTarget().isLittleEndian(); |
15713 | Value *UndefValue = |
15714 | llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2)); |
15715 | Value *Res = Builder.CreateInsertElement( |
15716 | UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0)); |
15717 | Res = Builder.CreateInsertElement(Res, Ops[1], |
15718 | (uint64_t)(isLittleEndian ? 0 : 1)); |
15719 | return Builder.CreateBitCast(Res, ConvertType(E->getType())); |
15720 | } |
15721 | |
15722 | case PPC::BI__builtin_unpack_vector_int128: { |
15723 | ConstantInt *Index = cast<ConstantInt>(Ops[1]); |
15724 | Value *Unpacked = Builder.CreateBitCast( |
15725 | Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2)); |
15726 | |
15727 | if (getTarget().isLittleEndian()) |
15728 | Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue()); |
15729 | |
15730 | return Builder.CreateExtractElement(Unpacked, Index); |
15731 | } |
15732 | |
15733 | case PPC::BI__builtin_ppc_sthcx: { |
15734 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx); |
15735 | Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy); |
15736 | Ops[1] = Builder.CreateSExt(Ops[1], Int32Ty); |
15737 | return Builder.CreateCall(F, Ops); |
15738 | } |
15739 | |
15740 | |
15741 | |
15742 | |
15743 | |
15744 | |
15745 | |
15746 | #define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \ |
15747 | case PPC::BI__builtin_##Name: |
15748 | #include "clang/Basic/BuiltinsPPC.def" |
15749 | { |
15750 | |
15751 | |
15752 | |
15753 | |
15754 | if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc || |
15755 | BuiltinID == PPC::BI__builtin_vsx_disassemble_pair || |
15756 | BuiltinID == PPC::BI__builtin_mma_disassemble_pair) { |
15757 | unsigned NumVecs = 2; |
15758 | auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair; |
15759 | if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) { |
15760 | NumVecs = 4; |
15761 | Intrinsic = Intrinsic::ppc_mma_disassemble_acc; |
15762 | } |
15763 | llvm::Function *F = CGM.getIntrinsic(Intrinsic); |
15764 | Address Addr = EmitPointerWithAlignment(E->getArg(1)); |
15765 | Value *Vec = Builder.CreateLoad(Addr); |
15766 | Value *Call = Builder.CreateCall(F, {Vec}); |
15767 | llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16); |
15768 | Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo()); |
15769 | for (unsigned i=0; i<NumVecs; i++) { |
15770 | Value *Vec = Builder.CreateExtractValue(Call, i); |
15771 | llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i); |
15772 | Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index); |
15773 | Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16)); |
15774 | } |
15775 | return Call; |
15776 | } |
15777 | bool Accumulate; |
15778 | switch (BuiltinID) { |
15779 | #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ |
15780 | case PPC::BI__builtin_##Name: \ |
15781 | ID = Intrinsic::ppc_##Intr; \ |
15782 | Accumulate = Acc; \ |
15783 | break; |
15784 | #include "clang/Basic/BuiltinsPPC.def" |
15785 | } |
15786 | if (BuiltinID == PPC::BI__builtin_vsx_lxvp || |
15787 | BuiltinID == PPC::BI__builtin_vsx_stxvp || |
15788 | BuiltinID == PPC::BI__builtin_mma_lxvp || |
15789 | BuiltinID == PPC::BI__builtin_mma_stxvp) { |
15790 | if (BuiltinID == PPC::BI__builtin_vsx_lxvp || |
15791 | BuiltinID == PPC::BI__builtin_mma_lxvp) { |
15792 | Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); |
15793 | Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]); |
15794 | } else { |
15795 | Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); |
15796 | Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]); |
15797 | } |
15798 | Ops.pop_back(); |
15799 | llvm::Function *F = CGM.getIntrinsic(ID); |
15800 | return Builder.CreateCall(F, Ops, ""); |
15801 | } |
15802 | SmallVector<Value*, 4> CallOps; |
15803 | if (Accumulate) { |
15804 | Address Addr = EmitPointerWithAlignment(E->getArg(0)); |
15805 | Value *Acc = Builder.CreateLoad(Addr); |
15806 | CallOps.push_back(Acc); |
15807 | } |
15808 | for (unsigned i=1; i<Ops.size(); i++) |
15809 | CallOps.push_back(Ops[i]); |
15810 | llvm::Function *F = CGM.getIntrinsic(ID); |
15811 | Value *Call = Builder.CreateCall(F, CallOps); |
15812 | return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64)); |
15813 | } |
15814 | |
15815 | case PPC::BI__builtin_ppc_compare_and_swap: |
15816 | case PPC::BI__builtin_ppc_compare_and_swaplp: { |
15817 | Address Addr = EmitPointerWithAlignment(E->getArg(0)); |
15818 | Address OldValAddr = EmitPointerWithAlignment(E->getArg(1)); |
15819 | Value *OldVal = Builder.CreateLoad(OldValAddr); |
15820 | QualType AtomicTy = E->getArg(0)->getType()->getPointeeType(); |
15821 | LValue LV = MakeAddrLValue(Addr, AtomicTy); |
15822 | auto Pair = EmitAtomicCompareExchange( |
15823 | LV, RValue::get(OldVal), RValue::get(Ops[2]), E->getExprLoc(), |
15824 | llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true); |
15825 | |
15826 | |
15827 | |
15828 | |
15829 | |
15830 | |
15831 | |
15832 | Value *LoadedVal = Pair.first.getScalarVal(); |
15833 | Builder.CreateStore(LoadedVal, OldValAddr); |
15834 | return Pair.second; |
15835 | } |
15836 | case PPC::BI__builtin_ppc_fetch_and_add: |
15837 | case PPC::BI__builtin_ppc_fetch_and_addlp: { |
15838 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E, |
15839 | llvm::AtomicOrdering::Monotonic); |
15840 | } |
15841 | case PPC::BI__builtin_ppc_fetch_and_and: |
15842 | case PPC::BI__builtin_ppc_fetch_and_andlp: { |
15843 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E, |
15844 | llvm::AtomicOrdering::Monotonic); |
15845 | } |
15846 | |
15847 | case PPC::BI__builtin_ppc_fetch_and_or: |
15848 | case PPC::BI__builtin_ppc_fetch_and_orlp: { |
15849 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E, |
15850 | llvm::AtomicOrdering::Monotonic); |
15851 | } |
15852 | case PPC::BI__builtin_ppc_fetch_and_swap: |
15853 | case PPC::BI__builtin_ppc_fetch_and_swaplp: { |
15854 | return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E, |
15855 | llvm::AtomicOrdering::Monotonic); |
15856 | } |
15857 | case PPC::BI__builtin_ppc_ldarx: |
15858 | case PPC::BI__builtin_ppc_lwarx: |
15859 | case PPC::BI__builtin_ppc_lharx: |
15860 | case PPC::BI__builtin_ppc_lbarx: |
15861 | return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E); |
15862 | case PPC::BI__builtin_ppc_mfspr: { |
15863 | llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32 |
15864 | ? Int32Ty |
15865 | : Int64Ty; |
15866 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType); |
15867 | return Builder.CreateCall(F, Ops); |
15868 | } |
15869 | case PPC::BI__builtin_ppc_mtspr: { |
15870 | llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32 |
15871 | ? Int32Ty |
15872 | : Int64Ty; |
15873 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType); |
15874 | return Builder.CreateCall(F, Ops); |
15875 | } |
15876 | case PPC::BI__builtin_ppc_popcntb: { |
15877 | Value *ArgValue = EmitScalarExpr(E->getArg(0)); |
15878 | llvm::Type *ArgType = ArgValue->getType(); |
15879 | Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType}); |
15880 | return Builder.CreateCall(F, Ops, "popcntb"); |
15881 | } |
15882 | case PPC::BI__builtin_ppc_mtfsf: { |
15883 | |
15884 | |
15885 | Value *Cast = Builder.CreateUIToFP(Ops[1], DoubleTy); |
15886 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf); |
15887 | return Builder.CreateCall(F, {Ops[0], Cast}, ""); |
15888 | } |
15889 | |
15890 | case PPC::BI__builtin_ppc_swdiv_nochk: |
15891 | case PPC::BI__builtin_ppc_swdivs_nochk: { |
15892 | FastMathFlags FMF = Builder.getFastMathFlags(); |
15893 | Builder.getFastMathFlags().setFast(); |
15894 | Value *FDiv = Builder.CreateFDiv(Ops[0], Ops[1], "swdiv_nochk"); |
15895 | Builder.getFastMathFlags() &= (FMF); |
15896 | return FDiv; |
15897 | } |
15898 | case PPC::BI__builtin_ppc_fric: |
15899 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
15900 | *this, E, Intrinsic::rint, |
15901 | Intrinsic::experimental_constrained_rint)) |
15902 | .getScalarVal(); |
15903 | case PPC::BI__builtin_ppc_frim: |
15904 | case PPC::BI__builtin_ppc_frims: |
15905 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
15906 | *this, E, Intrinsic::floor, |
15907 | Intrinsic::experimental_constrained_floor)) |
15908 | .getScalarVal(); |
15909 | case PPC::BI__builtin_ppc_frin: |
15910 | case PPC::BI__builtin_ppc_frins: |
15911 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
15912 | *this, E, Intrinsic::round, |
15913 | Intrinsic::experimental_constrained_round)) |
15914 | .getScalarVal(); |
15915 | case PPC::BI__builtin_ppc_frip: |
15916 | case PPC::BI__builtin_ppc_frips: |
15917 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
15918 | *this, E, Intrinsic::ceil, |
15919 | Intrinsic::experimental_constrained_ceil)) |
15920 | .getScalarVal(); |
15921 | case PPC::BI__builtin_ppc_friz: |
15922 | case PPC::BI__builtin_ppc_frizs: |
15923 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
15924 | *this, E, Intrinsic::trunc, |
15925 | Intrinsic::experimental_constrained_trunc)) |
15926 | .getScalarVal(); |
15927 | case PPC::BI__builtin_ppc_fsqrt: |
15928 | case PPC::BI__builtin_ppc_fsqrts: |
15929 | return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( |
15930 | *this, E, Intrinsic::sqrt, |
15931 | Intrinsic::experimental_constrained_sqrt)) |
15932 | .getScalarVal(); |
15933 | } |
15934 | } |
15935 | |
15936 | namespace { |
15937 | |
15938 | |
15939 | Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF, |
15940 | const CallExpr *E = nullptr) { |
15941 | auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr); |
15942 | auto *Call = CGF.Builder.CreateCall(F); |
15943 | Call->addAttribute( |
15944 | AttributeList::ReturnIndex, |
15945 | Attribute::getWithDereferenceableBytes(Call->getContext(), 64)); |
15946 | Call->addAttribute(AttributeList::ReturnIndex, |
15947 | Attribute::getWithAlignment(Call->getContext(), Align(4))); |
15948 | if (!E) |
15949 | return Call; |
15950 | QualType BuiltinRetType = E->getType(); |
15951 | auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType)); |
15952 | if (RetTy == Call->getType()) |
15953 | return Call; |
15954 | return CGF.Builder.CreateAddrSpaceCast(Call, RetTy); |
15955 | } |
15956 | |
15957 | |
15958 | Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) { |
15959 | const unsigned XOffset = 4; |
15960 | auto *DP = EmitAMDGPUDispatchPtr(CGF); |
15961 | |
15962 | auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2); |
15963 | auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset); |
15964 | auto *DstTy = |
15965 | CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace()); |
15966 | auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy); |
15967 | auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2))); |
15968 | llvm::MDBuilder MDHelper(CGF.getLLVMContext()); |
15969 | llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1), |
15970 | APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1)); |
15971 | LD->setMetadata(llvm::LLVMContext::MD_range, RNode); |
15972 | LD->setMetadata(llvm::LLVMContext::MD_invariant_load, |
15973 | llvm::MDNode::get(CGF.getLLVMContext(), None)); |
15974 | return LD; |
15975 | } |
15976 | |
15977 | |
15978 | Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) { |
15979 | const unsigned XOffset = 12; |
15980 | auto *DP = EmitAMDGPUDispatchPtr(CGF); |
15981 | |
15982 | auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4); |
15983 | auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset); |
15984 | auto *DstTy = |
15985 | CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace()); |
15986 | auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy); |
15987 | auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4))); |
15988 | LD->setMetadata(llvm::LLVMContext::MD_invariant_load, |
15989 | llvm::MDNode::get(CGF.getLLVMContext(), None)); |
15990 | return LD; |
15991 | } |
15992 | } |
15993 | |
15994 | |
15995 | |
15996 | |
15997 | |
15998 | |
15999 | |
16000 | bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope, |
16001 | llvm::AtomicOrdering &AO, |
16002 | llvm::SyncScope::ID &SSID) { |
16003 | if (isa<llvm::ConstantInt>(Order)) { |
16004 | int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); |
16005 | |
16006 | |
16007 | assert(llvm::isValidAtomicOrderingCABI(ord)); |
16008 | switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { |
16009 | case llvm::AtomicOrderingCABI::acquire: |
16010 | case llvm::AtomicOrderingCABI::consume: |
16011 | AO = llvm::AtomicOrdering::Acquire; |
16012 | break; |
16013 | case llvm::AtomicOrderingCABI::release: |
16014 | AO = llvm::AtomicOrdering::Release; |
16015 | break; |
16016 | case llvm::AtomicOrderingCABI::acq_rel: |
16017 | AO = llvm::AtomicOrdering::AcquireRelease; |
16018 | break; |
16019 | case llvm::AtomicOrderingCABI::seq_cst: |
16020 | AO = llvm::AtomicOrdering::SequentiallyConsistent; |
16021 | break; |
16022 | case llvm::AtomicOrderingCABI::relaxed: |
16023 | AO = llvm::AtomicOrdering::Monotonic; |
16024 | break; |
16025 | } |
16026 | |
16027 | StringRef scp; |
16028 | llvm::getConstantStringInfo(Scope, scp); |
16029 | SSID = getLLVMContext().getOrInsertSyncScopeID(scp); |
16030 | return true; |
16031 | } |
16032 | return false; |
16033 | } |
16034 | |
16035 | Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, |
16036 | const CallExpr *E) { |
16037 | llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent; |
16038 | llvm::SyncScope::ID SSID; |
16039 | switch (BuiltinID) { |
16040 | case AMDGPU::BI__builtin_amdgcn_div_scale: |
16041 | case AMDGPU::BI__builtin_amdgcn_div_scalef: { |
16042 | |
16043 | |
16044 | |
16045 | Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3)); |
16046 | |
16047 | llvm::Value *X = EmitScalarExpr(E->getArg(0)); |
16048 | llvm::Value *Y = EmitScalarExpr(E->getArg(1)); |
16049 | llvm::Value *Z = EmitScalarExpr(E->getArg(2)); |
16050 | |
16051 | llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale, |
16052 | X->getType()); |
16053 | |
16054 | llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z}); |
16055 | |
16056 | llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0); |
16057 | llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1); |
16058 | |
16059 | llvm::Type *RealFlagType |
16060 | = FlagOutPtr.getPointer()->getType()->getPointerElementType(); |
16061 | |
16062 | llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType); |
16063 | Builder.CreateStore(FlagExt, FlagOutPtr); |
16064 | return Result; |
16065 | } |
16066 | case AMDGPU::BI__builtin_amdgcn_div_fmas: |
16067 | case AMDGPU::BI__builtin_amdgcn_div_fmasf: { |
16068 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16069 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16070 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16071 | llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); |
16072 | |
16073 | llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas, |
16074 | Src0->getType()); |
16075 | llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3); |
16076 | return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool}); |
16077 | } |
16078 | |
16079 | case AMDGPU::BI__builtin_amdgcn_ds_swizzle: |
16080 | return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle); |
16081 | case AMDGPU::BI__builtin_amdgcn_mov_dpp8: |
16082 | return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8); |
16083 | case AMDGPU::BI__builtin_amdgcn_mov_dpp: |
16084 | case AMDGPU::BI__builtin_amdgcn_update_dpp: { |
16085 | llvm::SmallVector<llvm::Value *, 6> Args; |
16086 | for (unsigned I = 0; I != E->getNumArgs(); ++I) |
16087 | Args.push_back(EmitScalarExpr(E->getArg(I))); |
16088 | assert(Args.size() == 5 || Args.size() == 6); |
16089 | if (Args.size() == 5) |
16090 | Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType())); |
16091 | Function *F = |
16092 | CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType()); |
16093 | return Builder.CreateCall(F, Args); |
16094 | } |
16095 | case AMDGPU::BI__builtin_amdgcn_div_fixup: |
16096 | case AMDGPU::BI__builtin_amdgcn_div_fixupf: |
16097 | case AMDGPU::BI__builtin_amdgcn_div_fixuph: |
16098 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup); |
16099 | case AMDGPU::BI__builtin_amdgcn_trig_preop: |
16100 | case AMDGPU::BI__builtin_amdgcn_trig_preopf: |
16101 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop); |
16102 | case AMDGPU::BI__builtin_amdgcn_rcp: |
16103 | case AMDGPU::BI__builtin_amdgcn_rcpf: |
16104 | case AMDGPU::BI__builtin_amdgcn_rcph: |
16105 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp); |
16106 | case AMDGPU::BI__builtin_amdgcn_sqrt: |
16107 | case AMDGPU::BI__builtin_amdgcn_sqrtf: |
16108 | case AMDGPU::BI__builtin_amdgcn_sqrth: |
16109 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt); |
16110 | case AMDGPU::BI__builtin_amdgcn_rsq: |
16111 | case AMDGPU::BI__builtin_amdgcn_rsqf: |
16112 | case AMDGPU::BI__builtin_amdgcn_rsqh: |
16113 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq); |
16114 | case AMDGPU::BI__builtin_amdgcn_rsq_clamp: |
16115 | case AMDGPU::BI__builtin_amdgcn_rsq_clampf: |
16116 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp); |
16117 | case AMDGPU::BI__builtin_amdgcn_sinf: |
16118 | case AMDGPU::BI__builtin_amdgcn_sinh: |
16119 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin); |
16120 | case AMDGPU::BI__builtin_amdgcn_cosf: |
16121 | case AMDGPU::BI__builtin_amdgcn_cosh: |
16122 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos); |
16123 | case AMDGPU::BI__builtin_amdgcn_dispatch_ptr: |
16124 | return EmitAMDGPUDispatchPtr(*this, E); |
16125 | case AMDGPU::BI__builtin_amdgcn_log_clampf: |
16126 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp); |
16127 | case AMDGPU::BI__builtin_amdgcn_ldexp: |
16128 | case AMDGPU::BI__builtin_amdgcn_ldexpf: |
16129 | case AMDGPU::BI__builtin_amdgcn_ldexph: |
16130 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp); |
16131 | case AMDGPU::BI__builtin_amdgcn_frexp_mant: |
16132 | case AMDGPU::BI__builtin_amdgcn_frexp_mantf: |
16133 | case AMDGPU::BI__builtin_amdgcn_frexp_manth: |
16134 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant); |
16135 | case AMDGPU::BI__builtin_amdgcn_frexp_exp: |
16136 | case AMDGPU::BI__builtin_amdgcn_frexp_expf: { |
16137 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16138 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, |
16139 | { Builder.getInt32Ty(), Src0->getType() }); |
16140 | return Builder.CreateCall(F, Src0); |
16141 | } |
16142 | case AMDGPU::BI__builtin_amdgcn_frexp_exph: { |
16143 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16144 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, |
16145 | { Builder.getInt16Ty(), Src0->getType() }); |
16146 | return Builder.CreateCall(F, Src0); |
16147 | } |
16148 | case AMDGPU::BI__builtin_amdgcn_fract: |
16149 | case AMDGPU::BI__builtin_amdgcn_fractf: |
16150 | case AMDGPU::BI__builtin_amdgcn_fracth: |
16151 | return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract); |
16152 | case AMDGPU::BI__builtin_amdgcn_lerp: |
16153 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp); |
16154 | case AMDGPU::BI__builtin_amdgcn_ubfe: |
16155 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe); |
16156 | case AMDGPU::BI__builtin_amdgcn_sbfe: |
16157 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe); |
16158 | case AMDGPU::BI__builtin_amdgcn_uicmp: |
16159 | case AMDGPU::BI__builtin_amdgcn_uicmpl: |
16160 | case AMDGPU::BI__builtin_amdgcn_sicmp: |
16161 | case AMDGPU::BI__builtin_amdgcn_sicmpl: { |
16162 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16163 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16164 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16165 | |
16166 | |
16167 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp, |
16168 | { Builder.getInt64Ty(), Src0->getType() }); |
16169 | return Builder.CreateCall(F, { Src0, Src1, Src2 }); |
16170 | } |
16171 | case AMDGPU::BI__builtin_amdgcn_fcmp: |
16172 | case AMDGPU::BI__builtin_amdgcn_fcmpf: { |
16173 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16174 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16175 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16176 | |
16177 | |
16178 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp, |
16179 | { Builder.getInt64Ty(), Src0->getType() }); |
16180 | return Builder.CreateCall(F, { Src0, Src1, Src2 }); |
16181 | } |
16182 | case AMDGPU::BI__builtin_amdgcn_class: |
16183 | case AMDGPU::BI__builtin_amdgcn_classf: |
16184 | case AMDGPU::BI__builtin_amdgcn_classh: |
16185 | return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class); |
16186 | case AMDGPU::BI__builtin_amdgcn_fmed3f: |
16187 | case AMDGPU::BI__builtin_amdgcn_fmed3h: |
16188 | return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3); |
16189 | case AMDGPU::BI__builtin_amdgcn_ds_append: |
16190 | case AMDGPU::BI__builtin_amdgcn_ds_consume: { |
16191 | Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ? |
16192 | Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume; |
16193 | Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16194 | Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() }); |
16195 | return Builder.CreateCall(F, { Src0, Builder.getFalse() }); |
16196 | } |
16197 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: |
16198 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: |
16199 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: { |
16200 | Intrinsic::ID Intrin; |
16201 | switch (BuiltinID) { |
16202 | case AMDGPU::BI__builtin_amdgcn_ds_faddf: |
16203 | Intrin = Intrinsic::amdgcn_ds_fadd; |
16204 | break; |
16205 | case AMDGPU::BI__builtin_amdgcn_ds_fminf: |
16206 | Intrin = Intrinsic::amdgcn_ds_fmin; |
16207 | break; |
16208 | case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: |
16209 | Intrin = Intrinsic::amdgcn_ds_fmax; |
16210 | break; |
16211 | } |
16212 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16213 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16214 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16215 | llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); |
16216 | llvm::Value *Src4 = EmitScalarExpr(E->getArg(4)); |
16217 | llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() }); |
16218 | llvm::FunctionType *FTy = F->getFunctionType(); |
16219 | llvm::Type *PTy = FTy->getParamType(0); |
16220 | Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy); |
16221 | return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 }); |
16222 | } |
16223 | case AMDGPU::BI__builtin_amdgcn_read_exec: { |
16224 | CallInst *CI = cast<CallInst>( |
16225 | EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec")); |
16226 | CI->setConvergent(); |
16227 | return CI; |
16228 | } |
16229 | case AMDGPU::BI__builtin_amdgcn_read_exec_lo: |
16230 | case AMDGPU::BI__builtin_amdgcn_read_exec_hi: { |
16231 | StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ? |
16232 | "exec_lo" : "exec_hi"; |
16233 | CallInst *CI = cast<CallInst>( |
16234 | EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName)); |
16235 | CI->setConvergent(); |
16236 | return CI; |
16237 | } |
16238 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray: |
16239 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h: |
16240 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l: |
16241 | case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: { |
16242 | llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0)); |
16243 | llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1)); |
16244 | llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2)); |
16245 | llvm::Value *RayDir = EmitScalarExpr(E->getArg(3)); |
16246 | llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4)); |
16247 | llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5)); |
16248 | |
16249 | Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray, |
16250 | {NodePtr->getType(), RayDir->getType()}); |
16251 | return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir, |
16252 | RayInverseDir, TextureDescr}); |
16253 | } |
16254 | |
16255 | |
16256 | case AMDGPU::BI__builtin_amdgcn_workitem_id_x: |
16257 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024); |
16258 | case AMDGPU::BI__builtin_amdgcn_workitem_id_y: |
16259 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024); |
16260 | case AMDGPU::BI__builtin_amdgcn_workitem_id_z: |
16261 | return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024); |
16262 | |
16263 | |
16264 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_x: |
16265 | return EmitAMDGPUWorkGroupSize(*this, 0); |
16266 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_y: |
16267 | return EmitAMDGPUWorkGroupSize(*this, 1); |
16268 | case AMDGPU::BI__builtin_amdgcn_workgroup_size_z: |
16269 | return EmitAMDGPUWorkGroupSize(*this, 2); |
16270 | |
16271 | |
16272 | case AMDGPU::BI__builtin_amdgcn_grid_size_x: |
16273 | return EmitAMDGPUGridSize(*this, 0); |
16274 | case AMDGPU::BI__builtin_amdgcn_grid_size_y: |
16275 | return EmitAMDGPUGridSize(*this, 1); |
16276 | case AMDGPU::BI__builtin_amdgcn_grid_size_z: |
16277 | return EmitAMDGPUGridSize(*this, 2); |
16278 | |
16279 | |
16280 | case AMDGPU::BI__builtin_r600_recipsqrt_ieee: |
16281 | case AMDGPU::BI__builtin_r600_recipsqrt_ieeef: |
16282 | return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee); |
16283 | case AMDGPU::BI__builtin_r600_read_tidig_x: |
16284 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024); |
16285 | case AMDGPU::BI__builtin_r600_read_tidig_y: |
16286 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024); |
16287 | case AMDGPU::BI__builtin_r600_read_tidig_z: |
16288 | return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024); |
16289 | case AMDGPU::BI__builtin_amdgcn_alignbit: { |
16290 | llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); |
16291 | llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); |
16292 | llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); |
16293 | Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType()); |
16294 | return Builder.CreateCall(F, { Src0, Src1, Src2 }); |
16295 | } |
16296 | |
16297 | case AMDGPU::BI__builtin_amdgcn_fence: { |
16298 | if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)), |
16299 | EmitScalarExpr(E->getArg(1)), AO, SSID)) |
16300 | return Builder.CreateFence(AO, SSID); |
16301 | LLVM_FALLTHROUGH; |
16302 | } |
16303 | case AMDGPU::BI__builtin_amdgcn_atomic_inc32: |
16304 | case AMDGPU::BI__builtin_amdgcn_atomic_inc64: |
16305 | case AMDGPU::BI__builtin_amdgcn_atomic_dec32: |
16306 | case AMDGPU::BI__builtin_amdgcn_atomic_dec64: { |
16307 | unsigned BuiltinAtomicOp; |
16308 | llvm::Type *ResultType = ConvertType(E->getType()); |
16309 | |
16310 | switch (BuiltinID) { |
16311 | case AMDGPU::BI__builtin_amdgcn_atomic_inc32: |
16312 | case AMDGPU::BI__builtin_amdgcn_atomic_inc64: |
16313 | BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc; |
16314 | break; |
16315 | case AMDGPU::BI__builtin_amdgcn_atomic_dec32: |
16316 | case AMDGPU::BI__builtin_amdgcn_atomic_dec64: |
16317 | BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec; |
16318 | break; |
16319 | } |
16320 | |
16321 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
16322 | Value *Val = EmitScalarExpr(E->getArg(1)); |
16323 | |
16324 | llvm::Function *F = |
16325 | CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()}); |
16326 | |
16327 | if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)), |
16328 | EmitScalarExpr(E->getArg(3)), AO, SSID)) { |
16329 | |
16330 | |
16331 | |
16332 | Value *MemOrder = Builder.getInt32(static_cast<int>(AO)); |
16333 | Value *MemScope = Builder.getInt32(static_cast<int>(SSID)); |
16334 | |
16335 | QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); |
16336 | bool Volatile = |
16337 | PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); |
16338 | Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile)); |
16339 | |
16340 | return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile}); |
16341 | } |
16342 | LLVM_FALLTHROUGH; |
16343 | } |
16344 | default: |
16345 | return nullptr; |
16346 | } |
16347 | } |
16348 | |
16349 | |
16350 | |
16351 | |
16352 | static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF, |
16353 | unsigned IntrinsicID, |
16354 | const CallExpr *E) { |
16355 | unsigned NumArgs = E->getNumArgs() - 1; |
16356 | SmallVector<Value *, 8> Args(NumArgs); |
16357 | for (unsigned I = 0; I < NumArgs; ++I) |
16358 | Args[I] = CGF.EmitScalarExpr(E->getArg(I)); |
16359 | Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs)); |
16360 | Function *F = CGF.CGM.getIntrinsic(IntrinsicID); |
16361 | Value *Call = CGF.Builder.CreateCall(F, Args); |
16362 | Value *CC = CGF.Builder.CreateExtractValue(Call, 1); |
16363 | CGF.Builder.CreateStore(CC, CCPtr); |
16364 | return CGF.Builder.CreateExtractValue(Call, 0); |
16365 | } |
16366 | |
16367 | Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID, |
16368 | const CallExpr *E) { |
16369 | switch (BuiltinID) { |
16370 | case SystemZ::BI__builtin_tbegin: { |
16371 | Value *TDB = EmitScalarExpr(E->getArg(0)); |
16372 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); |
16373 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin); |
16374 | return Builder.CreateCall(F, {TDB, Control}); |
16375 | } |
16376 | case SystemZ::BI__builtin_tbegin_nofloat: { |
16377 | Value *TDB = EmitScalarExpr(E->getArg(0)); |
16378 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); |
16379 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat); |
16380 | return Builder.CreateCall(F, {TDB, Control}); |
16381 | } |
16382 | case SystemZ::BI__builtin_tbeginc: { |
16383 | Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy); |
16384 | Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08); |
16385 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc); |
16386 | return Builder.CreateCall(F, {TDB, Control}); |
16387 | } |
16388 | case SystemZ::BI__builtin_tabort: { |
16389 | Value *Data = EmitScalarExpr(E->getArg(0)); |
16390 | Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort); |
16391 | return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort")); |
16392 | } |
16393 | case SystemZ::BI__builtin_non_tx_store: { |
16394 | Value *Address = EmitScalarExpr(E->getArg(0)); |
16395 | Value *Data = EmitScalarExpr(E->getArg(1)); |
16396 | Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg); |
16397 | return Builder.CreateCall(F, {Data, Address}); |
16398 | } |
16399 | |
16400 | |
16401 | |
16402 | |
16403 | |
16404 | |
16405 | case SystemZ::BI__builtin_s390_vpopctb: |
16406 | case SystemZ::BI__builtin_s390_vpopcth: |
16407 | case SystemZ::BI__builtin_s390_vpopctf: |
16408 | case SystemZ::BI__builtin_s390_vpopctg: { |
16409 | llvm::Type *ResultType = ConvertType(E->getType()); |
16410 | Value *X = EmitScalarExpr(E->getArg(0)); |
16411 | Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); |
16412 | return Builder.CreateCall(F, X); |
16413 | } |
16414 | |
16415 | case SystemZ::BI__builtin_s390_vclzb: |
16416 | case SystemZ::BI__builtin_s390_vclzh: |
16417 | case SystemZ::BI__builtin_s390_vclzf: |
16418 | case SystemZ::BI__builtin_s390_vclzg: { |
16419 | llvm::Type *ResultType = ConvertType(E->getType()); |
16420 | Value *X = EmitScalarExpr(E->getArg(0)); |
16421 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
16422 | Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); |
16423 | return Builder.CreateCall(F, {X, Undef}); |
16424 | } |
16425 | |
16426 | case SystemZ::BI__builtin_s390_vctzb: |
16427 | case SystemZ::BI__builtin_s390_vctzh: |
16428 | case SystemZ::BI__builtin_s390_vctzf: |
16429 | case SystemZ::BI__builtin_s390_vctzg: { |
16430 | llvm::Type *ResultType = ConvertType(E->getType()); |
16431 | Value *X = EmitScalarExpr(E->getArg(0)); |
16432 | Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); |
16433 | Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); |
16434 | return Builder.CreateCall(F, {X, Undef}); |
16435 | } |
16436 | |
16437 | case SystemZ::BI__builtin_s390_vfsqsb: |
16438 | case SystemZ::BI__builtin_s390_vfsqdb: { |
16439 | llvm::Type *ResultType = ConvertType(E->getType()); |
16440 | Value *X = EmitScalarExpr(E->getArg(0)); |
16441 | if (Builder.getIsFPConstrained()) { |
16442 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType); |
16443 | return Builder.CreateConstrainedFPCall(F, { X }); |
16444 | } else { |
16445 | Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); |
16446 | return Builder.CreateCall(F, X); |
16447 | } |
16448 | } |
16449 | case SystemZ::BI__builtin_s390_vfmasb: |
16450 | case SystemZ::BI__builtin_s390_vfmadb: { |
16451 | llvm::Type *ResultType = ConvertType(E->getType()); |
16452 | Value *X = EmitScalarExpr(E->getArg(0)); |
16453 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16454 | Value *Z = EmitScalarExpr(E->getArg(2)); |
16455 | if (Builder.getIsFPConstrained()) { |
16456 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
16457 | return Builder.CreateConstrainedFPCall(F, {X, Y, Z}); |
16458 | } else { |
16459 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
16460 | return Builder.CreateCall(F, {X, Y, Z}); |
16461 | } |
16462 | } |
16463 | case SystemZ::BI__builtin_s390_vfmssb: |
16464 | case SystemZ::BI__builtin_s390_vfmsdb: { |
16465 | llvm::Type *ResultType = ConvertType(E->getType()); |
16466 | Value *X = EmitScalarExpr(E->getArg(0)); |
16467 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16468 | Value *Z = EmitScalarExpr(E->getArg(2)); |
16469 | if (Builder.getIsFPConstrained()) { |
16470 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
16471 | return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
16472 | } else { |
16473 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
16474 | return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); |
16475 | } |
16476 | } |
16477 | case SystemZ::BI__builtin_s390_vfnmasb: |
16478 | case SystemZ::BI__builtin_s390_vfnmadb: { |
16479 | llvm::Type *ResultType = ConvertType(E->getType()); |
16480 | Value *X = EmitScalarExpr(E->getArg(0)); |
16481 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16482 | Value *Z = EmitScalarExpr(E->getArg(2)); |
16483 | if (Builder.getIsFPConstrained()) { |
16484 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
16485 | return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); |
16486 | } else { |
16487 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
16488 | return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); |
16489 | } |
16490 | } |
16491 | case SystemZ::BI__builtin_s390_vfnmssb: |
16492 | case SystemZ::BI__builtin_s390_vfnmsdb: { |
16493 | llvm::Type *ResultType = ConvertType(E->getType()); |
16494 | Value *X = EmitScalarExpr(E->getArg(0)); |
16495 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16496 | Value *Z = EmitScalarExpr(E->getArg(2)); |
16497 | if (Builder.getIsFPConstrained()) { |
16498 | Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); |
16499 | Value *NegZ = Builder.CreateFNeg(Z, "sub"); |
16500 | return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ})); |
16501 | } else { |
16502 | Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); |
16503 | Value *NegZ = Builder.CreateFNeg(Z, "neg"); |
16504 | return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ})); |
16505 | } |
16506 | } |
16507 | case SystemZ::BI__builtin_s390_vflpsb: |
16508 | case SystemZ::BI__builtin_s390_vflpdb: { |
16509 | llvm::Type *ResultType = ConvertType(E->getType()); |
16510 | Value *X = EmitScalarExpr(E->getArg(0)); |
16511 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
16512 | return Builder.CreateCall(F, X); |
16513 | } |
16514 | case SystemZ::BI__builtin_s390_vflnsb: |
16515 | case SystemZ::BI__builtin_s390_vflndb: { |
16516 | llvm::Type *ResultType = ConvertType(E->getType()); |
16517 | Value *X = EmitScalarExpr(E->getArg(0)); |
16518 | Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); |
16519 | return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg"); |
16520 | } |
16521 | case SystemZ::BI__builtin_s390_vfisb: |
16522 | case SystemZ::BI__builtin_s390_vfidb: { |
16523 | llvm::Type *ResultType = ConvertType(E->getType()); |
16524 | Value *X = EmitScalarExpr(E->getArg(0)); |
16525 | |
16526 | llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext()); |
16527 | llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext()); |
16528 | |
16529 | |
16530 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
16531 | Intrinsic::ID CI; |
16532 | switch (M4.getZExtValue()) { |
16533 | default: break; |
16534 | case 0: |
16535 | switch (M5.getZExtValue()) { |
16536 | default: break; |
16537 | case 0: ID = Intrinsic::rint; |
16538 | CI = Intrinsic::experimental_constrained_rint; break; |
16539 | } |
16540 | break; |
16541 | case 4: |
16542 | switch (M5.getZExtValue()) { |
16543 | default: break; |
16544 | case 0: ID = Intrinsic::nearbyint; |
16545 | CI = Intrinsic::experimental_constrained_nearbyint; break; |
16546 | case 1: ID = Intrinsic::round; |
16547 | CI = Intrinsic::experimental_constrained_round; break; |
16548 | case 5: ID = Intrinsic::trunc; |
16549 | CI = Intrinsic::experimental_constrained_trunc; break; |
16550 | case 6: ID = Intrinsic::ceil; |
16551 | CI = Intrinsic::experimental_constrained_ceil; break; |
16552 | case 7: ID = Intrinsic::floor; |
16553 | CI = Intrinsic::experimental_constrained_floor; break; |
16554 | } |
16555 | break; |
16556 | } |
16557 | if (ID != Intrinsic::not_intrinsic) { |
16558 | if (Builder.getIsFPConstrained()) { |
16559 | Function *F = CGM.getIntrinsic(CI, ResultType); |
16560 | return Builder.CreateConstrainedFPCall(F, X); |
16561 | } else { |
16562 | Function *F = CGM.getIntrinsic(ID, ResultType); |
16563 | return Builder.CreateCall(F, X); |
16564 | } |
16565 | } |
16566 | switch (BuiltinID) { |
16567 | case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break; |
16568 | case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break; |
16569 | default: llvm_unreachable("Unknown BuiltinID"); |
16570 | } |
16571 | Function *F = CGM.getIntrinsic(ID); |
16572 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
16573 | Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5); |
16574 | return Builder.CreateCall(F, {X, M4Value, M5Value}); |
16575 | } |
16576 | case SystemZ::BI__builtin_s390_vfmaxsb: |
16577 | case SystemZ::BI__builtin_s390_vfmaxdb: { |
16578 | llvm::Type *ResultType = ConvertType(E->getType()); |
16579 | Value *X = EmitScalarExpr(E->getArg(0)); |
16580 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16581 | |
16582 | llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext()); |
16583 | |
16584 | |
16585 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
16586 | Intrinsic::ID CI; |
16587 | switch (M4.getZExtValue()) { |
16588 | default: break; |
16589 | case 4: ID = Intrinsic::maxnum; |
16590 | CI = Intrinsic::experimental_constrained_maxnum; break; |
16591 | } |
16592 | if (ID != Intrinsic::not_intrinsic) { |
16593 | if (Builder.getIsFPConstrained()) { |
16594 | Function *F = CGM.getIntrinsic(CI, ResultType); |
16595 | return Builder.CreateConstrainedFPCall(F, {X, Y}); |
16596 | } else { |
16597 | Function *F = CGM.getIntrinsic(ID, ResultType); |
16598 | return Builder.CreateCall(F, {X, Y}); |
16599 | } |
16600 | } |
16601 | switch (BuiltinID) { |
16602 | case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break; |
16603 | case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break; |
16604 | default: llvm_unreachable("Unknown BuiltinID"); |
16605 | } |
16606 | Function *F = CGM.getIntrinsic(ID); |
16607 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
16608 | return Builder.CreateCall(F, {X, Y, M4Value}); |
16609 | } |
16610 | case SystemZ::BI__builtin_s390_vfminsb: |
16611 | case SystemZ::BI__builtin_s390_vfmindb: { |
16612 | llvm::Type *ResultType = ConvertType(E->getType()); |
16613 | Value *X = EmitScalarExpr(E->getArg(0)); |
16614 | Value *Y = EmitScalarExpr(E->getArg(1)); |
16615 | |
16616 | llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext()); |
16617 | |
16618 | |
16619 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
16620 | Intrinsic::ID CI; |
16621 | switch (M4.getZExtValue()) { |
16622 | default: break; |
16623 | case 4: ID = Intrinsic::minnum; |
16624 | CI = Intrinsic::experimental_constrained_minnum; break; |
16625 | } |
16626 | if (ID != Intrinsic::not_intrinsic) { |
16627 | if (Builder.getIsFPConstrained()) { |
16628 | Function *F = CGM.getIntrinsic(CI, ResultType); |
16629 | return Builder.CreateConstrainedFPCall(F, {X, Y}); |
16630 | } else { |
16631 | Function *F = CGM.getIntrinsic(ID, ResultType); |
16632 | return Builder.CreateCall(F, {X, Y}); |
16633 | } |
16634 | } |
16635 | switch (BuiltinID) { |
16636 | case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break; |
16637 | case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break; |
16638 | default: llvm_unreachable("Unknown BuiltinID"); |
16639 | } |
16640 | Function *F = CGM.getIntrinsic(ID); |
16641 | Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); |
16642 | return Builder.CreateCall(F, {X, Y, M4Value}); |
16643 | } |
16644 | |
16645 | case SystemZ::BI__builtin_s390_vlbrh: |
16646 | case SystemZ::BI__builtin_s390_vlbrf: |
16647 | case SystemZ::BI__builtin_s390_vlbrg: { |
16648 | llvm::Type *ResultType = ConvertType(E->getType()); |
16649 | Value *X = EmitScalarExpr(E->getArg(0)); |
16650 | Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType); |
16651 | return Builder.CreateCall(F, X); |
16652 | } |
16653 | |
16654 | |
16655 | |
16656 | #define INTRINSIC_WITH_CC(NAME) \ |
16657 | case SystemZ::BI__builtin_##NAME: \ |
16658 | return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) |
16659 | |
16660 | INTRINSIC_WITH_CC(s390_vpkshs); |
16661 | INTRINSIC_WITH_CC(s390_vpksfs); |
16662 | INTRINSIC_WITH_CC(s390_vpksgs); |
16663 | |
16664 | INTRINSIC_WITH_CC(s390_vpklshs); |
16665 | INTRINSIC_WITH_CC(s390_vpklsfs); |
16666 | INTRINSIC_WITH_CC(s390_vpklsgs); |
16667 | |
16668 | INTRINSIC_WITH_CC(s390_vceqbs); |
16669 | INTRINSIC_WITH_CC(s390_vceqhs); |
16670 | INTRINSIC_WITH_CC(s390_vceqfs); |
16671 | INTRINSIC_WITH_CC(s390_vceqgs); |
16672 | |
16673 | INTRINSIC_WITH_CC(s390_vchbs); |
16674 | INTRINSIC_WITH_CC(s390_vchhs); |
16675 | INTRINSIC_WITH_CC(s390_vchfs); |
16676 | INTRINSIC_WITH_CC(s390_vchgs); |
16677 | |
16678 | INTRINSIC_WITH_CC(s390_vchlbs); |
16679 | INTRINSIC_WITH_CC(s390_vchlhs); |
16680 | INTRINSIC_WITH_CC(s390_vchlfs); |
16681 | INTRINSIC_WITH_CC(s390_vchlgs); |
16682 | |
16683 | INTRINSIC_WITH_CC(s390_vfaebs); |
16684 | INTRINSIC_WITH_CC(s390_vfaehs); |
16685 | INTRINSIC_WITH_CC(s390_vfaefs); |
16686 | |
16687 | INTRINSIC_WITH_CC(s390_vfaezbs); |
16688 | INTRINSIC_WITH_CC(s390_vfaezhs); |
16689 | INTRINSIC_WITH_CC(s390_vfaezfs); |
16690 | |
16691 | INTRINSIC_WITH_CC(s390_vfeebs); |
16692 | INTRINSIC_WITH_CC(s390_vfeehs); |
16693 | INTRINSIC_WITH_CC(s390_vfeefs); |
16694 | |
16695 | INTRINSIC_WITH_CC(s390_vfeezbs); |
16696 | INTRINSIC_WITH_CC(s390_vfeezhs); |
16697 | INTRINSIC_WITH_CC(s390_vfeezfs); |
16698 | |
16699 | INTRINSIC_WITH_CC(s390_vfenebs); |
16700 | INTRINSIC_WITH_CC(s390_vfenehs); |
16701 | INTRINSIC_WITH_CC(s390_vfenefs); |
16702 | |
16703 | INTRINSIC_WITH_CC(s390_vfenezbs); |
16704 | INTRINSIC_WITH_CC(s390_vfenezhs); |
16705 | INTRINSIC_WITH_CC(s390_vfenezfs); |
16706 | |
16707 | INTRINSIC_WITH_CC(s390_vistrbs); |
16708 | INTRINSIC_WITH_CC(s390_vistrhs); |
16709 | INTRINSIC_WITH_CC(s390_vistrfs); |
16710 | |
16711 | INTRINSIC_WITH_CC(s390_vstrcbs); |
16712 | INTRINSIC_WITH_CC(s390_vstrchs); |
16713 | INTRINSIC_WITH_CC(s390_vstrcfs); |
16714 | |
16715 | INTRINSIC_WITH_CC(s390_vstrczbs); |
16716 | INTRINSIC_WITH_CC(s390_vstrczhs); |
16717 | INTRINSIC_WITH_CC(s390_vstrczfs); |
16718 | |
16719 | INTRINSIC_WITH_CC(s390_vfcesbs); |
16720 | INTRINSIC_WITH_CC(s390_vfcedbs); |
16721 | INTRINSIC_WITH_CC(s390_vfchsbs); |
16722 | INTRINSIC_WITH_CC(s390_vfchdbs); |
16723 | INTRINSIC_WITH_CC(s390_vfchesbs); |
16724 | INTRINSIC_WITH_CC(s390_vfchedbs); |
16725 | |
16726 | INTRINSIC_WITH_CC(s390_vftcisb); |
16727 | INTRINSIC_WITH_CC(s390_vftcidb); |
16728 | |
16729 | INTRINSIC_WITH_CC(s390_vstrsb); |
16730 | INTRINSIC_WITH_CC(s390_vstrsh); |
16731 | INTRINSIC_WITH_CC(s390_vstrsf); |
16732 | |
16733 | INTRINSIC_WITH_CC(s390_vstrszb); |
16734 | INTRINSIC_WITH_CC(s390_vstrszh); |
16735 | INTRINSIC_WITH_CC(s390_vstrszf); |
16736 | |
16737 | #undef INTRINSIC_WITH_CC |
16738 | |
16739 | default: |
16740 | return nullptr; |
16741 | } |
16742 | } |
16743 | |
16744 | namespace { |
16745 | |
16746 | struct NVPTXMmaLdstInfo { |
16747 | unsigned NumResults; |
16748 | |
16749 | unsigned IID_col; |
16750 | unsigned IID_row; |
16751 | }; |
16752 | |
16753 | #define MMA_INTR(geom_op_type, layout) \ |
16754 | Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride |
16755 | #define MMA_LDST(n, geom_op_type) \ |
16756 | { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) } |
16757 | |
16758 | static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) { |
16759 | switch (BuiltinID) { |
16760 | |
16761 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
16762 | return MMA_LDST(8, m16n16k16_load_a_f16); |
16763 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
16764 | return MMA_LDST(8, m16n16k16_load_b_f16); |
16765 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
16766 | return MMA_LDST(4, m16n16k16_load_c_f16); |
16767 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
16768 | return MMA_LDST(8, m16n16k16_load_c_f32); |
16769 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
16770 | return MMA_LDST(8, m32n8k16_load_a_f16); |
16771 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
16772 | return MMA_LDST(8, m32n8k16_load_b_f16); |
16773 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
16774 | return MMA_LDST(4, m32n8k16_load_c_f16); |
16775 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
16776 | return MMA_LDST(8, m32n8k16_load_c_f32); |
16777 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
16778 | return MMA_LDST(8, m8n32k16_load_a_f16); |
16779 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
16780 | return MMA_LDST(8, m8n32k16_load_b_f16); |
16781 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
16782 | return MMA_LDST(4, m8n32k16_load_c_f16); |
16783 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
16784 | return MMA_LDST(8, m8n32k16_load_c_f32); |
16785 | |
16786 | |
16787 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
16788 | return MMA_LDST(2, m16n16k16_load_a_s8); |
16789 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
16790 | return MMA_LDST(2, m16n16k16_load_a_u8); |
16791 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
16792 | return MMA_LDST(2, m16n16k16_load_b_s8); |
16793 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
16794 | return MMA_LDST(2, m16n16k16_load_b_u8); |
16795 | case NVPTX::BI__imma_m16n16k16_ld_c: |
16796 | return MMA_LDST(8, m16n16k16_load_c_s32); |
16797 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
16798 | return MMA_LDST(4, m32n8k16_load_a_s8); |
16799 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
16800 | return MMA_LDST(4, m32n8k16_load_a_u8); |
16801 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
16802 | return MMA_LDST(1, m32n8k16_load_b_s8); |
16803 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
16804 | return MMA_LDST(1, m32n8k16_load_b_u8); |
16805 | case NVPTX::BI__imma_m32n8k16_ld_c: |
16806 | return MMA_LDST(8, m32n8k16_load_c_s32); |
16807 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
16808 | return MMA_LDST(1, m8n32k16_load_a_s8); |
16809 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
16810 | return MMA_LDST(1, m8n32k16_load_a_u8); |
16811 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
16812 | return MMA_LDST(4, m8n32k16_load_b_s8); |
16813 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
16814 | return MMA_LDST(4, m8n32k16_load_b_u8); |
16815 | case NVPTX::BI__imma_m8n32k16_ld_c: |
16816 | return MMA_LDST(8, m8n32k16_load_c_s32); |
16817 | |
16818 | |
16819 | |
16820 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
16821 | return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)}; |
16822 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
16823 | return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)}; |
16824 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
16825 | return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0}; |
16826 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
16827 | return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0}; |
16828 | case NVPTX::BI__imma_m8n8k32_ld_c: |
16829 | return MMA_LDST(2, m8n8k32_load_c_s32); |
16830 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
16831 | return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)}; |
16832 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
16833 | return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0}; |
16834 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
16835 | return MMA_LDST(2, m8n8k128_load_c_s32); |
16836 | |
16837 | |
16838 | case NVPTX::BI__dmma_m8n8k4_ld_a: |
16839 | return MMA_LDST(1, m8n8k4_load_a_f64); |
16840 | case NVPTX::BI__dmma_m8n8k4_ld_b: |
16841 | return MMA_LDST(1, m8n8k4_load_b_f64); |
16842 | case NVPTX::BI__dmma_m8n8k4_ld_c: |
16843 | return MMA_LDST(2, m8n8k4_load_c_f64); |
16844 | |
16845 | |
16846 | case NVPTX::BI__mma_bf16_m16n16k16_ld_a: |
16847 | return MMA_LDST(4, m16n16k16_load_a_bf16); |
16848 | case NVPTX::BI__mma_bf16_m16n16k16_ld_b: |
16849 | return MMA_LDST(4, m16n16k16_load_b_bf16); |
16850 | case NVPTX::BI__mma_bf16_m8n32k16_ld_a: |
16851 | return MMA_LDST(2, m8n32k16_load_a_bf16); |
16852 | case NVPTX::BI__mma_bf16_m8n32k16_ld_b: |
16853 | return MMA_LDST(8, m8n32k16_load_b_bf16); |
16854 | case NVPTX::BI__mma_bf16_m32n8k16_ld_a: |
16855 | return MMA_LDST(8, m32n8k16_load_a_bf16); |
16856 | case NVPTX::BI__mma_bf16_m32n8k16_ld_b: |
16857 | return MMA_LDST(2, m32n8k16_load_b_bf16); |
16858 | case NVPTX::BI__mma_tf32_m16n16k8_ld_a: |
16859 | return MMA_LDST(4, m16n16k8_load_a_tf32); |
16860 | case NVPTX::BI__mma_tf32_m16n16k8_ld_b: |
16861 | return MMA_LDST(2, m16n16k8_load_b_tf32); |
16862 | case NVPTX::BI__mma_tf32_m16n16k8_ld_c: |
16863 | return MMA_LDST(8, m16n16k8_load_c_f32); |
16864 | |
16865 | |
16866 | |
16867 | |
16868 | |
16869 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
16870 | return MMA_LDST(4, m16n16k16_store_d_f16); |
16871 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
16872 | return MMA_LDST(8, m16n16k16_store_d_f32); |
16873 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
16874 | return MMA_LDST(4, m32n8k16_store_d_f16); |
16875 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
16876 | return MMA_LDST(8, m32n8k16_store_d_f32); |
16877 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
16878 | return MMA_LDST(4, m8n32k16_store_d_f16); |
16879 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
16880 | return MMA_LDST(8, m8n32k16_store_d_f32); |
16881 | |
16882 | |
16883 | |
16884 | |
16885 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
16886 | return MMA_LDST(8, m16n16k16_store_d_s32); |
16887 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
16888 | return MMA_LDST(8, m32n8k16_store_d_s32); |
16889 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
16890 | return MMA_LDST(8, m8n32k16_store_d_s32); |
16891 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
16892 | return MMA_LDST(2, m8n8k32_store_d_s32); |
16893 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: |
16894 | return MMA_LDST(2, m8n8k128_store_d_s32); |
16895 | |
16896 | |
16897 | case NVPTX::BI__dmma_m8n8k4_st_c_f64: |
16898 | return MMA_LDST(2, m8n8k4_store_d_f64); |
16899 | |
16900 | |
16901 | case NVPTX::BI__mma_m16n16k8_st_c_f32: |
16902 | return MMA_LDST(8, m16n16k8_store_d_f32); |
16903 | |
16904 | default: |
16905 | llvm_unreachable("Unknown MMA builtin"); |
16906 | } |
16907 | } |
16908 | #undef MMA_LDST |
16909 | #undef MMA_INTR |
16910 | |
16911 | |
16912 | struct NVPTXMmaInfo { |
16913 | unsigned NumEltsA; |
16914 | unsigned NumEltsB; |
16915 | unsigned NumEltsC; |
16916 | unsigned NumEltsD; |
16917 | |
16918 | |
16919 | |
16920 | |
16921 | std::array<unsigned, 8> Variants; |
16922 | |
16923 | unsigned getMMAIntrinsic(int Layout, bool Satf) { |
16924 | unsigned Index = Layout + 4 * Satf; |
16925 | if (Index >= Variants.size()) |
16926 | return 0; |
16927 | return Variants[Index]; |
16928 | } |
16929 | }; |
16930 | |
16931 | |
16932 | |
16933 | static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) { |
16934 | |
16935 | #define MMA_VARIANTS(geom, type) \ |
16936 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \ |
16937 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
16938 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \ |
16939 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type |
16940 | #define MMA_SATF_VARIANTS(geom, type) \ |
16941 | MMA_VARIANTS(geom, type), \ |
16942 | Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \ |
16943 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
16944 | Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \ |
16945 | Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite |
16946 | |
16947 | #define MMA_VARIANTS_I4(geom, type) \ |
16948 | 0, \ |
16949 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \ |
16950 | 0, \ |
16951 | 0, \ |
16952 | 0, \ |
16953 | Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \ |
16954 | 0, \ |
16955 | 0 |
16956 | |
16957 | #define MMA_VARIANTS_B1_XOR(geom, type) \ |
16958 | 0, \ |
16959 | Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \ |
16960 | 0, \ |
16961 | 0, \ |
16962 | 0, \ |
16963 | 0, \ |
16964 | 0, \ |
16965 | 0 |
16966 | #define MMA_VARIANTS_B1_AND(geom, type) \ |
16967 | 0, \ |
16968 | Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \ |
16969 | 0, \ |
16970 | 0, \ |
16971 | 0, \ |
16972 | 0, \ |
16973 | 0, \ |
16974 | 0 |
16975 | |
16976 | switch (BuiltinID) { |
16977 | |
16978 | |
16979 | |
16980 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
16981 | return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}}; |
16982 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
16983 | return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}}; |
16984 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
16985 | return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}}; |
16986 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
16987 | return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}}; |
16988 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
16989 | return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}}; |
16990 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
16991 | return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}}; |
16992 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
16993 | return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}}; |
16994 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
16995 | return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}}; |
16996 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
16997 | return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}}; |
16998 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
16999 | return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}}; |
17000 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
17001 | return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}}; |
17002 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
17003 | return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}}; |
17004 | |
17005 | |
17006 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
17007 | return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}}; |
17008 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
17009 | return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}}; |
17010 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
17011 | return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}}; |
17012 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
17013 | return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}}; |
17014 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
17015 | return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}}; |
17016 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
17017 | return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}}; |
17018 | |
17019 | |
17020 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
17021 | return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}}; |
17022 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
17023 | return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}}; |
17024 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: |
17025 | return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}}; |
17026 | case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1: |
17027 | return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}}; |
17028 | |
17029 | |
17030 | case NVPTX::BI__dmma_m8n8k4_mma_f64: |
17031 | return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}}; |
17032 | |
17033 | |
17034 | case NVPTX::BI__mma_bf16_m16n16k16_mma_f32: |
17035 | return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}}; |
17036 | case NVPTX::BI__mma_bf16_m8n32k16_mma_f32: |
17037 | return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}}; |
17038 | case NVPTX::BI__mma_bf16_m32n8k16_mma_f32: |
17039 | return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}}; |
17040 | case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: |
17041 | return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}}; |
17042 | default: |
17043 | llvm_unreachable("Unexpected builtin ID."); |
17044 | } |
17045 | #undef MMA_VARIANTS |
17046 | #undef MMA_SATF_VARIANTS |
17047 | #undef MMA_VARIANTS_I4 |
17048 | #undef MMA_VARIANTS_B1_AND |
17049 | #undef MMA_VARIANTS_B1_XOR |
17050 | } |
17051 | |
17052 | } |
17053 | |
17054 | Value * |
17055 | CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) { |
17056 | auto MakeLdg = [&](unsigned IntrinsicID) { |
17057 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17058 | clang::CharUnits Align = |
17059 | CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType()); |
17060 | return Builder.CreateCall( |
17061 | CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(), |
17062 | Ptr->getType()}), |
17063 | {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())}); |
17064 | }; |
17065 | auto MakeScopedAtomic = [&](unsigned IntrinsicID) { |
17066 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17067 | return Builder.CreateCall( |
17068 | CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(), |
17069 | Ptr->getType()}), |
17070 | {Ptr, EmitScalarExpr(E->getArg(1))}); |
17071 | }; |
17072 | switch (BuiltinID) { |
17073 | case NVPTX::BI__nvvm_atom_add_gen_i: |
17074 | case NVPTX::BI__nvvm_atom_add_gen_l: |
17075 | case NVPTX::BI__nvvm_atom_add_gen_ll: |
17076 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E); |
17077 | |
17078 | case NVPTX::BI__nvvm_atom_sub_gen_i: |
17079 | case NVPTX::BI__nvvm_atom_sub_gen_l: |
17080 | case NVPTX::BI__nvvm_atom_sub_gen_ll: |
17081 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E); |
17082 | |
17083 | case NVPTX::BI__nvvm_atom_and_gen_i: |
17084 | case NVPTX::BI__nvvm_atom_and_gen_l: |
17085 | case NVPTX::BI__nvvm_atom_and_gen_ll: |
17086 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E); |
17087 | |
17088 | case NVPTX::BI__nvvm_atom_or_gen_i: |
17089 | case NVPTX::BI__nvvm_atom_or_gen_l: |
17090 | case NVPTX::BI__nvvm_atom_or_gen_ll: |
17091 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E); |
17092 | |
17093 | case NVPTX::BI__nvvm_atom_xor_gen_i: |
17094 | case NVPTX::BI__nvvm_atom_xor_gen_l: |
17095 | case NVPTX::BI__nvvm_atom_xor_gen_ll: |
17096 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E); |
17097 | |
17098 | case NVPTX::BI__nvvm_atom_xchg_gen_i: |
17099 | case NVPTX::BI__nvvm_atom_xchg_gen_l: |
17100 | case NVPTX::BI__nvvm_atom_xchg_gen_ll: |
17101 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E); |
17102 | |
17103 | case NVPTX::BI__nvvm_atom_max_gen_i: |
17104 | case NVPTX::BI__nvvm_atom_max_gen_l: |
17105 | case NVPTX::BI__nvvm_atom_max_gen_ll: |
17106 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E); |
17107 | |
17108 | case NVPTX::BI__nvvm_atom_max_gen_ui: |
17109 | case NVPTX::BI__nvvm_atom_max_gen_ul: |
17110 | case NVPTX::BI__nvvm_atom_max_gen_ull: |
17111 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E); |
17112 | |
17113 | case NVPTX::BI__nvvm_atom_min_gen_i: |
17114 | case NVPTX::BI__nvvm_atom_min_gen_l: |
17115 | case NVPTX::BI__nvvm_atom_min_gen_ll: |
17116 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E); |
17117 | |
17118 | case NVPTX::BI__nvvm_atom_min_gen_ui: |
17119 | case NVPTX::BI__nvvm_atom_min_gen_ul: |
17120 | case NVPTX::BI__nvvm_atom_min_gen_ull: |
17121 | return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E); |
17122 | |
17123 | case NVPTX::BI__nvvm_atom_cas_gen_i: |
17124 | case NVPTX::BI__nvvm_atom_cas_gen_l: |
17125 | case NVPTX::BI__nvvm_atom_cas_gen_ll: |
17126 | |
17127 | |
17128 | return MakeAtomicCmpXchgValue(*this, E, false); |
17129 | |
17130 | case NVPTX::BI__nvvm_atom_add_gen_f: |
17131 | case NVPTX::BI__nvvm_atom_add_gen_d: { |
17132 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17133 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17134 | return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val, |
17135 | AtomicOrdering::SequentiallyConsistent); |
17136 | } |
17137 | |
17138 | case NVPTX::BI__nvvm_atom_inc_gen_ui: { |
17139 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17140 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17141 | Function *FnALI32 = |
17142 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType()); |
17143 | return Builder.CreateCall(FnALI32, {Ptr, Val}); |
17144 | } |
17145 | |
17146 | case NVPTX::BI__nvvm_atom_dec_gen_ui: { |
17147 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17148 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17149 | Function *FnALD32 = |
17150 | CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType()); |
17151 | return Builder.CreateCall(FnALD32, {Ptr, Val}); |
17152 | } |
17153 | |
17154 | case NVPTX::BI__nvvm_ldg_c: |
17155 | case NVPTX::BI__nvvm_ldg_c2: |
17156 | case NVPTX::BI__nvvm_ldg_c4: |
17157 | case NVPTX::BI__nvvm_ldg_s: |
17158 | case NVPTX::BI__nvvm_ldg_s2: |
17159 | case NVPTX::BI__nvvm_ldg_s4: |
17160 | case NVPTX::BI__nvvm_ldg_i: |
17161 | case NVPTX::BI__nvvm_ldg_i2: |
17162 | case NVPTX::BI__nvvm_ldg_i4: |
17163 | case NVPTX::BI__nvvm_ldg_l: |
17164 | case NVPTX::BI__nvvm_ldg_ll: |
17165 | case NVPTX::BI__nvvm_ldg_ll2: |
17166 | case NVPTX::BI__nvvm_ldg_uc: |
17167 | case NVPTX::BI__nvvm_ldg_uc2: |
17168 | case NVPTX::BI__nvvm_ldg_uc4: |
17169 | case NVPTX::BI__nvvm_ldg_us: |
17170 | case NVPTX::BI__nvvm_ldg_us2: |
17171 | case NVPTX::BI__nvvm_ldg_us4: |
17172 | case NVPTX::BI__nvvm_ldg_ui: |
17173 | case NVPTX::BI__nvvm_ldg_ui2: |
17174 | case NVPTX::BI__nvvm_ldg_ui4: |
17175 | case NVPTX::BI__nvvm_ldg_ul: |
17176 | case NVPTX::BI__nvvm_ldg_ull: |
17177 | case NVPTX::BI__nvvm_ldg_ull2: |
17178 | |
17179 | |
17180 | |
17181 | return MakeLdg(Intrinsic::nvvm_ldg_global_i); |
17182 | case NVPTX::BI__nvvm_ldg_f: |
17183 | case NVPTX::BI__nvvm_ldg_f2: |
17184 | case NVPTX::BI__nvvm_ldg_f4: |
17185 | case NVPTX::BI__nvvm_ldg_d: |
17186 | case NVPTX::BI__nvvm_ldg_d2: |
17187 | return MakeLdg(Intrinsic::nvvm_ldg_global_f); |
17188 | |
17189 | case NVPTX::BI__nvvm_atom_cta_add_gen_i: |
17190 | case NVPTX::BI__nvvm_atom_cta_add_gen_l: |
17191 | case NVPTX::BI__nvvm_atom_cta_add_gen_ll: |
17192 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta); |
17193 | case NVPTX::BI__nvvm_atom_sys_add_gen_i: |
17194 | case NVPTX::BI__nvvm_atom_sys_add_gen_l: |
17195 | case NVPTX::BI__nvvm_atom_sys_add_gen_ll: |
17196 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys); |
17197 | case NVPTX::BI__nvvm_atom_cta_add_gen_f: |
17198 | case NVPTX::BI__nvvm_atom_cta_add_gen_d: |
17199 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta); |
17200 | case NVPTX::BI__nvvm_atom_sys_add_gen_f: |
17201 | case NVPTX::BI__nvvm_atom_sys_add_gen_d: |
17202 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys); |
17203 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_i: |
17204 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_l: |
17205 | case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll: |
17206 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta); |
17207 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_i: |
17208 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_l: |
17209 | case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll: |
17210 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys); |
17211 | case NVPTX::BI__nvvm_atom_cta_max_gen_i: |
17212 | case NVPTX::BI__nvvm_atom_cta_max_gen_ui: |
17213 | case NVPTX::BI__nvvm_atom_cta_max_gen_l: |
17214 | case NVPTX::BI__nvvm_atom_cta_max_gen_ul: |
17215 | case NVPTX::BI__nvvm_atom_cta_max_gen_ll: |
17216 | case NVPTX::BI__nvvm_atom_cta_max_gen_ull: |
17217 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta); |
17218 | case NVPTX::BI__nvvm_atom_sys_max_gen_i: |
17219 | case NVPTX::BI__nvvm_atom_sys_max_gen_ui: |
17220 | case NVPTX::BI__nvvm_atom_sys_max_gen_l: |
17221 | case NVPTX::BI__nvvm_atom_sys_max_gen_ul: |
17222 | case NVPTX::BI__nvvm_atom_sys_max_gen_ll: |
17223 | case NVPTX::BI__nvvm_atom_sys_max_gen_ull: |
17224 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys); |
17225 | case NVPTX::BI__nvvm_atom_cta_min_gen_i: |
17226 | case NVPTX::BI__nvvm_atom_cta_min_gen_ui: |
17227 | case NVPTX::BI__nvvm_atom_cta_min_gen_l: |
17228 | case NVPTX::BI__nvvm_atom_cta_min_gen_ul: |
17229 | case NVPTX::BI__nvvm_atom_cta_min_gen_ll: |
17230 | case NVPTX::BI__nvvm_atom_cta_min_gen_ull: |
17231 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta); |
17232 | case NVPTX::BI__nvvm_atom_sys_min_gen_i: |
17233 | case NVPTX::BI__nvvm_atom_sys_min_gen_ui: |
17234 | case NVPTX::BI__nvvm_atom_sys_min_gen_l: |
17235 | case NVPTX::BI__nvvm_atom_sys_min_gen_ul: |
17236 | case NVPTX::BI__nvvm_atom_sys_min_gen_ll: |
17237 | case NVPTX::BI__nvvm_atom_sys_min_gen_ull: |
17238 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys); |
17239 | case NVPTX::BI__nvvm_atom_cta_inc_gen_ui: |
17240 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta); |
17241 | case NVPTX::BI__nvvm_atom_cta_dec_gen_ui: |
17242 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta); |
17243 | case NVPTX::BI__nvvm_atom_sys_inc_gen_ui: |
17244 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys); |
17245 | case NVPTX::BI__nvvm_atom_sys_dec_gen_ui: |
17246 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys); |
17247 | case NVPTX::BI__nvvm_atom_cta_and_gen_i: |
17248 | case NVPTX::BI__nvvm_atom_cta_and_gen_l: |
17249 | case NVPTX::BI__nvvm_atom_cta_and_gen_ll: |
17250 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta); |
17251 | case NVPTX::BI__nvvm_atom_sys_and_gen_i: |
17252 | case NVPTX::BI__nvvm_atom_sys_and_gen_l: |
17253 | case NVPTX::BI__nvvm_atom_sys_and_gen_ll: |
17254 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys); |
17255 | case NVPTX::BI__nvvm_atom_cta_or_gen_i: |
17256 | case NVPTX::BI__nvvm_atom_cta_or_gen_l: |
17257 | case NVPTX::BI__nvvm_atom_cta_or_gen_ll: |
17258 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta); |
17259 | case NVPTX::BI__nvvm_atom_sys_or_gen_i: |
17260 | case NVPTX::BI__nvvm_atom_sys_or_gen_l: |
17261 | case NVPTX::BI__nvvm_atom_sys_or_gen_ll: |
17262 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys); |
17263 | case NVPTX::BI__nvvm_atom_cta_xor_gen_i: |
17264 | case NVPTX::BI__nvvm_atom_cta_xor_gen_l: |
17265 | case NVPTX::BI__nvvm_atom_cta_xor_gen_ll: |
17266 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta); |
17267 | case NVPTX::BI__nvvm_atom_sys_xor_gen_i: |
17268 | case NVPTX::BI__nvvm_atom_sys_xor_gen_l: |
17269 | case NVPTX::BI__nvvm_atom_sys_xor_gen_ll: |
17270 | return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys); |
17271 | case NVPTX::BI__nvvm_atom_cta_cas_gen_i: |
17272 | case NVPTX::BI__nvvm_atom_cta_cas_gen_l: |
17273 | case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: { |
17274 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17275 | return Builder.CreateCall( |
17276 | CGM.getIntrinsic( |
17277 | Intrinsic::nvvm_atomic_cas_gen_i_cta, |
17278 | {Ptr->getType()->getPointerElementType(), Ptr->getType()}), |
17279 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); |
17280 | } |
17281 | case NVPTX::BI__nvvm_atom_sys_cas_gen_i: |
17282 | case NVPTX::BI__nvvm_atom_sys_cas_gen_l: |
17283 | case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: { |
17284 | Value *Ptr = EmitScalarExpr(E->getArg(0)); |
17285 | return Builder.CreateCall( |
17286 | CGM.getIntrinsic( |
17287 | Intrinsic::nvvm_atomic_cas_gen_i_sys, |
17288 | {Ptr->getType()->getPointerElementType(), Ptr->getType()}), |
17289 | {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); |
17290 | } |
17291 | case NVPTX::BI__nvvm_match_all_sync_i32p: |
17292 | case NVPTX::BI__nvvm_match_all_sync_i64p: { |
17293 | Value *Mask = EmitScalarExpr(E->getArg(0)); |
17294 | Value *Val = EmitScalarExpr(E->getArg(1)); |
17295 | Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2)); |
17296 | Value *ResultPair = Builder.CreateCall( |
17297 | CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p |
17298 | ? Intrinsic::nvvm_match_all_sync_i32p |
17299 | : Intrinsic::nvvm_match_all_sync_i64p), |
17300 | {Mask, Val}); |
17301 | Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1), |
17302 | PredOutPtr.getElementType()); |
17303 | Builder.CreateStore(Pred, PredOutPtr); |
17304 | return Builder.CreateExtractValue(ResultPair, 0); |
17305 | } |
17306 | |
17307 | |
17308 | case NVPTX::BI__hmma_m16n16k16_ld_a: |
17309 | case NVPTX::BI__hmma_m16n16k16_ld_b: |
17310 | case NVPTX::BI__hmma_m16n16k16_ld_c_f16: |
17311 | case NVPTX::BI__hmma_m16n16k16_ld_c_f32: |
17312 | case NVPTX::BI__hmma_m32n8k16_ld_a: |
17313 | case NVPTX::BI__hmma_m32n8k16_ld_b: |
17314 | case NVPTX::BI__hmma_m32n8k16_ld_c_f16: |
17315 | case NVPTX::BI__hmma_m32n8k16_ld_c_f32: |
17316 | case NVPTX::BI__hmma_m8n32k16_ld_a: |
17317 | case NVPTX::BI__hmma_m8n32k16_ld_b: |
17318 | case NVPTX::BI__hmma_m8n32k16_ld_c_f16: |
17319 | case NVPTX::BI__hmma_m8n32k16_ld_c_f32: |
17320 | |
17321 | case NVPTX::BI__imma_m16n16k16_ld_a_s8: |
17322 | case NVPTX::BI__imma_m16n16k16_ld_a_u8: |
17323 | case NVPTX::BI__imma_m16n16k16_ld_b_s8: |
17324 | case NVPTX::BI__imma_m16n16k16_ld_b_u8: |
17325 | case NVPTX::BI__imma_m16n16k16_ld_c: |
17326 | case NVPTX::BI__imma_m32n8k16_ld_a_s8: |
17327 | case NVPTX::BI__imma_m32n8k16_ld_a_u8: |
17328 | case NVPTX::BI__imma_m32n8k16_ld_b_s8: |
17329 | case NVPTX::BI__imma_m32n8k16_ld_b_u8: |
17330 | case NVPTX::BI__imma_m32n8k16_ld_c: |
17331 | case NVPTX::BI__imma_m8n32k16_ld_a_s8: |
17332 | case NVPTX::BI__imma_m8n32k16_ld_a_u8: |
17333 | case NVPTX::BI__imma_m8n32k16_ld_b_s8: |
17334 | case NVPTX::BI__imma_m8n32k16_ld_b_u8: |
17335 | case NVPTX::BI__imma_m8n32k16_ld_c: |
17336 | |
17337 | case NVPTX::BI__imma_m8n8k32_ld_a_s4: |
17338 | case NVPTX::BI__imma_m8n8k32_ld_a_u4: |
17339 | case NVPTX::BI__imma_m8n8k32_ld_b_s4: |
17340 | case NVPTX::BI__imma_m8n8k32_ld_b_u4: |
17341 | case NVPTX::BI__imma_m8n8k32_ld_c: |
17342 | case NVPTX::BI__bmma_m8n8k128_ld_a_b1: |
17343 | case NVPTX::BI__bmma_m8n8k128_ld_b_b1: |
17344 | case NVPTX::BI__bmma_m8n8k128_ld_c: |
17345 | |
17346 | case NVPTX::BI__dmma_m8n8k4_ld_a: |
17347 | case NVPTX::BI__dmma_m8n8k4_ld_b: |
17348 | case NVPTX::BI__dmma_m8n8k4_ld_c: |
17349 | |
17350 | case NVPTX::BI__mma_bf16_m16n16k16_ld_a: |
17351 | case NVPTX::BI__mma_bf16_m16n16k16_ld_b: |
17352 | case NVPTX::BI__mma_bf16_m8n32k16_ld_a: |
17353 | case NVPTX::BI__mma_bf16_m8n32k16_ld_b: |
17354 | case NVPTX::BI__mma_bf16_m32n8k16_ld_a: |
17355 | case NVPTX::BI__mma_bf16_m32n8k16_ld_b: |
17356 | case NVPTX::BI__mma_tf32_m16n16k8_ld_a: |
17357 | case NVPTX::BI__mma_tf32_m16n16k8_ld_b: |
17358 | case NVPTX::BI__mma_tf32_m16n16k8_ld_c: { |
17359 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); |
17360 | Value *Src = EmitScalarExpr(E->getArg(1)); |
17361 | Value *Ldm = EmitScalarExpr(E->getArg(2)); |
17362 | Optional<llvm::APSInt> isColMajorArg = |
17363 | E->getArg(3)->getIntegerConstantExpr(getContext()); |
17364 | if (!isColMajorArg) |
17365 | return nullptr; |
17366 | bool isColMajor = isColMajorArg->getSExtValue(); |
17367 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
17368 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
17369 | if (IID == 0) |
17370 | return nullptr; |
17371 | |
17372 | Value *Result = |
17373 | Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm}); |
17374 | |
17375 | |
17376 | assert(II.NumResults); |
17377 | if (II.NumResults == 1) { |
17378 | Builder.CreateAlignedStore(Result, Dst.getPointer(), |
17379 | CharUnits::fromQuantity(4)); |
17380 | } else { |
17381 | for (unsigned i = 0; i < II.NumResults; ++i) { |
17382 | Builder.CreateAlignedStore( |
17383 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), |
17384 | Dst.getElementType()), |
17385 | Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(), |
17386 | llvm::ConstantInt::get(IntTy, i)), |
17387 | CharUnits::fromQuantity(4)); |
17388 | } |
17389 | } |
17390 | return Result; |
17391 | } |
17392 | |
17393 | case NVPTX::BI__hmma_m16n16k16_st_c_f16: |
17394 | case NVPTX::BI__hmma_m16n16k16_st_c_f32: |
17395 | case NVPTX::BI__hmma_m32n8k16_st_c_f16: |
17396 | case NVPTX::BI__hmma_m32n8k16_st_c_f32: |
17397 | case NVPTX::BI__hmma_m8n32k16_st_c_f16: |
17398 | case NVPTX::BI__hmma_m8n32k16_st_c_f32: |
17399 | case NVPTX::BI__imma_m16n16k16_st_c_i32: |
17400 | case NVPTX::BI__imma_m32n8k16_st_c_i32: |
17401 | case NVPTX::BI__imma_m8n32k16_st_c_i32: |
17402 | case NVPTX::BI__imma_m8n8k32_st_c_i32: |
17403 | case NVPTX::BI__bmma_m8n8k128_st_c_i32: |
17404 | case NVPTX::BI__dmma_m8n8k4_st_c_f64: |
17405 | case NVPTX::BI__mma_m16n16k8_st_c_f32: { |
17406 | Value *Dst = EmitScalarExpr(E->getArg(0)); |
17407 | Address Src = EmitPointerWithAlignment(E->getArg(1)); |
17408 | Value *Ldm = EmitScalarExpr(E->getArg(2)); |
17409 | Optional<llvm::APSInt> isColMajorArg = |
17410 | E->getArg(3)->getIntegerConstantExpr(getContext()); |
17411 | if (!isColMajorArg) |
17412 | return nullptr; |
17413 | bool isColMajor = isColMajorArg->getSExtValue(); |
17414 | NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID); |
17415 | unsigned IID = isColMajor ? II.IID_col : II.IID_row; |
17416 | if (IID == 0) |
17417 | return nullptr; |
17418 | Function *Intrinsic = |
17419 | CGM.getIntrinsic(IID, Dst->getType()); |
17420 | llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1); |
17421 | SmallVector<Value *, 10> Values = {Dst}; |
17422 | for (unsigned i = 0; i < II.NumResults; ++i) { |
17423 | Value *V = Builder.CreateAlignedLoad( |
17424 | Src.getElementType(), |
17425 | Builder.CreateGEP(Src.getElementType(), Src.getPointer(), |
17426 | llvm::ConstantInt::get(IntTy, i)), |
17427 | CharUnits::fromQuantity(4)); |
17428 | Values.push_back(Builder.CreateBitCast(V, ParamType)); |
17429 | } |
17430 | Values.push_back(Ldm); |
17431 | Value *Result = Builder.CreateCall(Intrinsic, Values); |
17432 | return Result; |
17433 | } |
17434 | |
17435 | |
17436 | |
17437 | case NVPTX::BI__hmma_m16n16k16_mma_f16f16: |
17438 | case NVPTX::BI__hmma_m16n16k16_mma_f32f16: |
17439 | case NVPTX::BI__hmma_m16n16k16_mma_f32f32: |
17440 | case NVPTX::BI__hmma_m16n16k16_mma_f16f32: |
17441 | case NVPTX::BI__hmma_m32n8k16_mma_f16f16: |
17442 | case NVPTX::BI__hmma_m32n8k16_mma_f32f16: |
17443 | case NVPTX::BI__hmma_m32n8k16_mma_f32f32: |
17444 | case NVPTX::BI__hmma_m32n8k16_mma_f16f32: |
17445 | case NVPTX::BI__hmma_m8n32k16_mma_f16f16: |
17446 | case NVPTX::BI__hmma_m8n32k16_mma_f32f16: |
17447 | case NVPTX::BI__hmma_m8n32k16_mma_f32f32: |
17448 | case NVPTX::BI__hmma_m8n32k16_mma_f16f32: |
17449 | case NVPTX::BI__imma_m16n16k16_mma_s8: |
17450 | case NVPTX::BI__imma_m16n16k16_mma_u8: |
17451 | case NVPTX::BI__imma_m32n8k16_mma_s8: |
17452 | case NVPTX::BI__imma_m32n8k16_mma_u8: |
17453 | case NVPTX::BI__imma_m8n32k16_mma_s8: |
17454 | case NVPTX::BI__imma_m8n32k16_mma_u8: |
17455 | case NVPTX::BI__imma_m8n8k32_mma_s4: |
17456 | case NVPTX::BI__imma_m8n8k32_mma_u4: |
17457 | case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: |
17458 | case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1: |
17459 | case NVPTX::BI__dmma_m8n8k4_mma_f64: |
17460 | case NVPTX::BI__mma_bf16_m16n16k16_mma_f32: |
17461 | case NVPTX::BI__mma_bf16_m8n32k16_mma_f32: |
17462 | case NVPTX::BI__mma_bf16_m32n8k16_mma_f32: |
17463 | case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: { |
17464 | Address Dst = EmitPointerWithAlignment(E->getArg(0)); |
17465 | Address SrcA = EmitPointerWithAlignment(E->getArg(1)); |
17466 | Address SrcB = EmitPointerWithAlignment(E->getArg(2)); |
17467 | Address SrcC = EmitPointerWithAlignment(E->getArg(3)); |
17468 | Optional<llvm::APSInt> LayoutArg = |
17469 | E->getArg(4)->getIntegerConstantExpr(getContext()); |
17470 | if (!LayoutArg) |
17471 | return nullptr; |
17472 | int Layout = LayoutArg->getSExtValue(); |
17473 | if (Layout < 0 || Layout > 3) |
17474 | return nullptr; |
17475 | llvm::APSInt SatfArg; |
17476 | if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 || |
17477 | BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1) |
17478 | SatfArg = 0; |
17479 | else if (Optional<llvm::APSInt> OptSatfArg = |
17480 | E->getArg(5)->getIntegerConstantExpr(getContext())) |
17481 | SatfArg = *OptSatfArg; |
17482 | else |
17483 | return nullptr; |
17484 | bool Satf = SatfArg.getSExtValue(); |
17485 | NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID); |
17486 | unsigned IID = MI.getMMAIntrinsic(Layout, Satf); |
17487 | if (IID == 0) |
17488 | return nullptr; |
17489 | |
17490 | SmallVector<Value *, 24> Values; |
17491 | Function *Intrinsic = CGM.getIntrinsic(IID); |
17492 | llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0); |
17493 | |
17494 | for (unsigned i = 0; i < MI.NumEltsA; ++i) { |
17495 | Value *V = Builder.CreateAlignedLoad( |
17496 | SrcA.getElementType(), |
17497 | Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(), |
17498 | llvm::ConstantInt::get(IntTy, i)), |
17499 | CharUnits::fromQuantity(4)); |
17500 | Values.push_back(Builder.CreateBitCast(V, AType)); |
17501 | } |
17502 | |
17503 | llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA); |
17504 | for (unsigned i = 0; i < MI.NumEltsB; ++i) { |
17505 | Value *V = Builder.CreateAlignedLoad( |
17506 | SrcB.getElementType(), |
17507 | Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(), |
17508 | llvm::ConstantInt::get(IntTy, i)), |
17509 | CharUnits::fromQuantity(4)); |
17510 | Values.push_back(Builder.CreateBitCast(V, BType)); |
17511 | } |
17512 | |
17513 | llvm::Type *CType = |
17514 | Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB); |
17515 | for (unsigned i = 0; i < MI.NumEltsC; ++i) { |
17516 | Value *V = Builder.CreateAlignedLoad( |
17517 | SrcC.getElementType(), |
17518 | Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(), |
17519 | llvm::ConstantInt::get(IntTy, i)), |
17520 | CharUnits::fromQuantity(4)); |
17521 | Values.push_back(Builder.CreateBitCast(V, CType)); |
17522 | } |
17523 | Value *Result = Builder.CreateCall(Intrinsic, Values); |
17524 | llvm::Type *DType = Dst.getElementType(); |
17525 | for (unsigned i = 0; i < MI.NumEltsD; ++i) |
17526 | Builder.CreateAlignedStore( |
17527 | Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType), |
17528 | Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(), |
17529 | llvm::ConstantInt::get(IntTy, i)), |
17530 | CharUnits::fromQuantity(4)); |
17531 | return Result; |
17532 | } |
17533 | default: |
17534 | return nullptr; |
17535 | } |
17536 | } |
17537 | |
17538 | namespace { |
17539 | struct BuiltinAlignArgs { |
17540 | llvm::Value *Src = nullptr; |
17541 | llvm::Type *SrcType = nullptr; |
17542 | llvm::Value *Alignment = nullptr; |
17543 | llvm::Value *Mask = nullptr; |
17544 | llvm::IntegerType *IntType = nullptr; |
17545 | |
17546 | BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) { |
17547 | QualType AstType = E->getArg(0)->getType(); |
17548 | if (AstType->isArrayType()) |
17549 | Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer(); |
17550 | else |
17551 | Src = CGF.EmitScalarExpr(E->getArg(0)); |
17552 | SrcType = Src->getType(); |
17553 | if (SrcType->isPointerTy()) { |
17554 | IntType = IntegerType::get( |
17555 | CGF.getLLVMContext(), |
17556 | CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType)); |
17557 | } else { |
17558 | assert(SrcType->isIntegerTy()); |
17559 | IntType = cast<llvm::IntegerType>(SrcType); |
17560 | } |
17561 | Alignment = CGF.EmitScalarExpr(E->getArg(1)); |
17562 | Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment"); |
17563 | auto *One = llvm::ConstantInt::get(IntType, 1); |
17564 | Mask = CGF.Builder.CreateSub(Alignment, One, "mask"); |
17565 | } |
17566 | }; |
17567 | } |
17568 | |
17569 | |
17570 | RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) { |
17571 | BuiltinAlignArgs Args(E, *this); |
17572 | llvm::Value *SrcAddress = Args.Src; |
17573 | if (Args.SrcType->isPointerTy()) |
17574 | SrcAddress = |
17575 | Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr"); |
17576 | return RValue::get(Builder.CreateICmpEQ( |
17577 | Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"), |
17578 | llvm::Constant::getNullValue(Args.IntType), "is_aligned")); |
17579 | } |
17580 | |
17581 | |
17582 | |
17583 | |
17584 | |
17585 | RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) { |
17586 | BuiltinAlignArgs Args(E, *this); |
17587 | llvm::Value *SrcAddr = Args.Src; |
17588 | if (Args.Src->getType()->isPointerTy()) |
17589 | SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr"); |
17590 | llvm::Value *SrcForMask = SrcAddr; |
17591 | if (AlignUp) { |
17592 | |
17593 | |
17594 | |
17595 | |
17596 | SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary"); |
17597 | } |
17598 | |
17599 | llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask"); |
17600 | llvm::Value *Result = |
17601 | Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result"); |
17602 | if (Args.Src->getType()->isPointerTy()) { |
17603 | |
17604 | |
17605 | |
17606 | |
17607 | Result->setName("aligned_intptr"); |
17608 | llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff"); |
17609 | |
17610 | |
17611 | Value *Base = EmitCastToVoidPtr(Args.Src); |
17612 | if (getLangOpts().isSignedOverflowDefined()) |
17613 | Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result"); |
17614 | else |
17615 | Result = EmitCheckedInBoundsGEP(Base, Difference, |
17616 | true, |
17617 | !AlignUp, |
17618 | E->getExprLoc(), "aligned_result"); |
17619 | Result = Builder.CreatePointerCast(Result, Args.SrcType); |
17620 | |
17621 | |
17622 | emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment); |
17623 | } |
17624 | assert(Result->getType() == Args.SrcType); |
17625 | return RValue::get(Result); |
17626 | } |
17627 | |
17628 | Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, |
17629 | const CallExpr *E) { |
17630 | switch (BuiltinID) { |
17631 | case WebAssembly::BI__builtin_wasm_memory_size: { |
17632 | llvm::Type *ResultType = ConvertType(E->getType()); |
17633 | Value *I = EmitScalarExpr(E->getArg(0)); |
17634 | Function *Callee = |
17635 | CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType); |
17636 | return Builder.CreateCall(Callee, I); |
17637 | } |
17638 | case WebAssembly::BI__builtin_wasm_memory_grow: { |
17639 | llvm::Type *ResultType = ConvertType(E->getType()); |
17640 | Value *Args[] = {EmitScalarExpr(E->getArg(0)), |
17641 | EmitScalarExpr(E->getArg(1))}; |
17642 | Function *Callee = |
17643 | CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType); |
17644 | return Builder.CreateCall(Callee, Args); |
17645 | } |
17646 | case WebAssembly::BI__builtin_wasm_tls_size: { |
17647 | llvm::Type *ResultType = ConvertType(E->getType()); |
17648 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType); |
17649 | return Builder.CreateCall(Callee); |
17650 | } |
17651 | case WebAssembly::BI__builtin_wasm_tls_align: { |
17652 | llvm::Type *ResultType = ConvertType(E->getType()); |
17653 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType); |
17654 | return Builder.CreateCall(Callee); |
17655 | } |
17656 | case WebAssembly::BI__builtin_wasm_tls_base: { |
17657 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base); |
17658 | return Builder.CreateCall(Callee); |
17659 | } |
17660 | case WebAssembly::BI__builtin_wasm_throw: { |
17661 | Value *Tag = EmitScalarExpr(E->getArg(0)); |
17662 | Value *Obj = EmitScalarExpr(E->getArg(1)); |
17663 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw); |
17664 | return Builder.CreateCall(Callee, {Tag, Obj}); |
17665 | } |
17666 | case WebAssembly::BI__builtin_wasm_rethrow: { |
17667 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow); |
17668 | return Builder.CreateCall(Callee); |
17669 | } |
17670 | case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: { |
17671 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
17672 | Value *Expected = EmitScalarExpr(E->getArg(1)); |
17673 | Value *Timeout = EmitScalarExpr(E->getArg(2)); |
17674 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32); |
17675 | return Builder.CreateCall(Callee, {Addr, Expected, Timeout}); |
17676 | } |
17677 | case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: { |
17678 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
17679 | Value *Expected = EmitScalarExpr(E->getArg(1)); |
17680 | Value *Timeout = EmitScalarExpr(E->getArg(2)); |
17681 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64); |
17682 | return Builder.CreateCall(Callee, {Addr, Expected, Timeout}); |
17683 | } |
17684 | case WebAssembly::BI__builtin_wasm_memory_atomic_notify: { |
17685 | Value *Addr = EmitScalarExpr(E->getArg(0)); |
17686 | Value *Count = EmitScalarExpr(E->getArg(1)); |
17687 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify); |
17688 | return Builder.CreateCall(Callee, {Addr, Count}); |
17689 | } |
17690 | case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32: |
17691 | case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64: |
17692 | case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32: |
17693 | case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: { |
17694 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17695 | llvm::Type *ResT = ConvertType(E->getType()); |
17696 | Function *Callee = |
17697 | CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()}); |
17698 | return Builder.CreateCall(Callee, {Src}); |
17699 | } |
17700 | case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32: |
17701 | case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64: |
17702 | case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32: |
17703 | case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: { |
17704 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17705 | llvm::Type *ResT = ConvertType(E->getType()); |
17706 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned, |
17707 | {ResT, Src->getType()}); |
17708 | return Builder.CreateCall(Callee, {Src}); |
17709 | } |
17710 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32: |
17711 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64: |
17712 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32: |
17713 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64: |
17714 | case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: { |
17715 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17716 | llvm::Type *ResT = ConvertType(E->getType()); |
17717 | Function *Callee = |
17718 | CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()}); |
17719 | return Builder.CreateCall(Callee, {Src}); |
17720 | } |
17721 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32: |
17722 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64: |
17723 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32: |
17724 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64: |
17725 | case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: { |
17726 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17727 | llvm::Type *ResT = ConvertType(E->getType()); |
17728 | Function *Callee = |
17729 | CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()}); |
17730 | return Builder.CreateCall(Callee, {Src}); |
17731 | } |
17732 | case WebAssembly::BI__builtin_wasm_min_f32: |
17733 | case WebAssembly::BI__builtin_wasm_min_f64: |
17734 | case WebAssembly::BI__builtin_wasm_min_f32x4: |
17735 | case WebAssembly::BI__builtin_wasm_min_f64x2: { |
17736 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17737 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17738 | Function *Callee = |
17739 | CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType())); |
17740 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17741 | } |
17742 | case WebAssembly::BI__builtin_wasm_max_f32: |
17743 | case WebAssembly::BI__builtin_wasm_max_f64: |
17744 | case WebAssembly::BI__builtin_wasm_max_f32x4: |
17745 | case WebAssembly::BI__builtin_wasm_max_f64x2: { |
17746 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17747 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17748 | Function *Callee = |
17749 | CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType())); |
17750 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17751 | } |
17752 | case WebAssembly::BI__builtin_wasm_ceil_f32x4: |
17753 | case WebAssembly::BI__builtin_wasm_floor_f32x4: |
17754 | case WebAssembly::BI__builtin_wasm_trunc_f32x4: |
17755 | case WebAssembly::BI__builtin_wasm_nearest_f32x4: |
17756 | case WebAssembly::BI__builtin_wasm_ceil_f64x2: |
17757 | case WebAssembly::BI__builtin_wasm_floor_f64x2: |
17758 | case WebAssembly::BI__builtin_wasm_trunc_f64x2: |
17759 | case WebAssembly::BI__builtin_wasm_nearest_f64x2: { |
17760 | unsigned IntNo; |
17761 | switch (BuiltinID) { |
17762 | case WebAssembly::BI__builtin_wasm_ceil_f32x4: |
17763 | case WebAssembly::BI__builtin_wasm_ceil_f64x2: |
17764 | IntNo = Intrinsic::ceil; |
17765 | break; |
17766 | case WebAssembly::BI__builtin_wasm_floor_f32x4: |
17767 | case WebAssembly::BI__builtin_wasm_floor_f64x2: |
17768 | IntNo = Intrinsic::floor; |
17769 | break; |
17770 | case WebAssembly::BI__builtin_wasm_trunc_f32x4: |
17771 | case WebAssembly::BI__builtin_wasm_trunc_f64x2: |
17772 | IntNo = Intrinsic::trunc; |
17773 | break; |
17774 | case WebAssembly::BI__builtin_wasm_nearest_f32x4: |
17775 | case WebAssembly::BI__builtin_wasm_nearest_f64x2: |
17776 | IntNo = Intrinsic::nearbyint; |
17777 | break; |
17778 | default: |
17779 | llvm_unreachable("unexpected builtin ID"); |
17780 | } |
17781 | Value *Value = EmitScalarExpr(E->getArg(0)); |
17782 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
17783 | return Builder.CreateCall(Callee, Value); |
17784 | } |
17785 | case WebAssembly::BI__builtin_wasm_swizzle_i8x16: { |
17786 | Value *Src = EmitScalarExpr(E->getArg(0)); |
17787 | Value *Indices = EmitScalarExpr(E->getArg(1)); |
17788 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle); |
17789 | return Builder.CreateCall(Callee, {Src, Indices}); |
17790 | } |
17791 | case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16: |
17792 | case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16: |
17793 | case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8: |
17794 | case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8: |
17795 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16: |
17796 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16: |
17797 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8: |
17798 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: { |
17799 | unsigned IntNo; |
17800 | switch (BuiltinID) { |
17801 | case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16: |
17802 | case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8: |
17803 | IntNo = Intrinsic::sadd_sat; |
17804 | break; |
17805 | case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16: |
17806 | case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8: |
17807 | IntNo = Intrinsic::uadd_sat; |
17808 | break; |
17809 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16: |
17810 | case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8: |
17811 | IntNo = Intrinsic::wasm_sub_sat_signed; |
17812 | break; |
17813 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16: |
17814 | case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: |
17815 | IntNo = Intrinsic::wasm_sub_sat_unsigned; |
17816 | break; |
17817 | default: |
17818 | llvm_unreachable("unexpected builtin ID"); |
17819 | } |
17820 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17821 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17822 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
17823 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17824 | } |
17825 | case WebAssembly::BI__builtin_wasm_abs_i8x16: |
17826 | case WebAssembly::BI__builtin_wasm_abs_i16x8: |
17827 | case WebAssembly::BI__builtin_wasm_abs_i32x4: |
17828 | case WebAssembly::BI__builtin_wasm_abs_i64x2: { |
17829 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
17830 | Value *Neg = Builder.CreateNeg(Vec, "neg"); |
17831 | Constant *Zero = llvm::Constant::getNullValue(Vec->getType()); |
17832 | Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond"); |
17833 | return Builder.CreateSelect(ICmp, Neg, Vec, "abs"); |
17834 | } |
17835 | case WebAssembly::BI__builtin_wasm_min_s_i8x16: |
17836 | case WebAssembly::BI__builtin_wasm_min_u_i8x16: |
17837 | case WebAssembly::BI__builtin_wasm_max_s_i8x16: |
17838 | case WebAssembly::BI__builtin_wasm_max_u_i8x16: |
17839 | case WebAssembly::BI__builtin_wasm_min_s_i16x8: |
17840 | case WebAssembly::BI__builtin_wasm_min_u_i16x8: |
17841 | case WebAssembly::BI__builtin_wasm_max_s_i16x8: |
17842 | case WebAssembly::BI__builtin_wasm_max_u_i16x8: |
17843 | case WebAssembly::BI__builtin_wasm_min_s_i32x4: |
17844 | case WebAssembly::BI__builtin_wasm_min_u_i32x4: |
17845 | case WebAssembly::BI__builtin_wasm_max_s_i32x4: |
17846 | case WebAssembly::BI__builtin_wasm_max_u_i32x4: { |
17847 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17848 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17849 | Value *ICmp; |
17850 | switch (BuiltinID) { |
17851 | case WebAssembly::BI__builtin_wasm_min_s_i8x16: |
17852 | case WebAssembly::BI__builtin_wasm_min_s_i16x8: |
17853 | case WebAssembly::BI__builtin_wasm_min_s_i32x4: |
17854 | ICmp = Builder.CreateICmpSLT(LHS, RHS); |
17855 | break; |
17856 | case WebAssembly::BI__builtin_wasm_min_u_i8x16: |
17857 | case WebAssembly::BI__builtin_wasm_min_u_i16x8: |
17858 | case WebAssembly::BI__builtin_wasm_min_u_i32x4: |
17859 | ICmp = Builder.CreateICmpULT(LHS, RHS); |
17860 | break; |
17861 | case WebAssembly::BI__builtin_wasm_max_s_i8x16: |
17862 | case WebAssembly::BI__builtin_wasm_max_s_i16x8: |
17863 | case WebAssembly::BI__builtin_wasm_max_s_i32x4: |
17864 | ICmp = Builder.CreateICmpSGT(LHS, RHS); |
17865 | break; |
17866 | case WebAssembly::BI__builtin_wasm_max_u_i8x16: |
17867 | case WebAssembly::BI__builtin_wasm_max_u_i16x8: |
17868 | case WebAssembly::BI__builtin_wasm_max_u_i32x4: |
17869 | ICmp = Builder.CreateICmpUGT(LHS, RHS); |
17870 | break; |
17871 | default: |
17872 | llvm_unreachable("unexpected builtin ID"); |
17873 | } |
17874 | return Builder.CreateSelect(ICmp, LHS, RHS); |
17875 | } |
17876 | case WebAssembly::BI__builtin_wasm_avgr_u_i8x16: |
17877 | case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: { |
17878 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17879 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17880 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned, |
17881 | ConvertType(E->getType())); |
17882 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17883 | } |
17884 | case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: { |
17885 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17886 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17887 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed); |
17888 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17889 | } |
17890 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8: |
17891 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8: |
17892 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4: |
17893 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: { |
17894 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
17895 | unsigned IntNo; |
17896 | switch (BuiltinID) { |
17897 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8: |
17898 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4: |
17899 | IntNo = Intrinsic::wasm_extadd_pairwise_signed; |
17900 | break; |
17901 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8: |
17902 | case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: |
17903 | IntNo = Intrinsic::wasm_extadd_pairwise_unsigned; |
17904 | break; |
17905 | default: |
17906 | llvm_unreachable("unexptected builtin ID"); |
17907 | } |
17908 | |
17909 | Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); |
17910 | return Builder.CreateCall(Callee, Vec); |
17911 | } |
17912 | case WebAssembly::BI__builtin_wasm_bitselect: { |
17913 | Value *V1 = EmitScalarExpr(E->getArg(0)); |
17914 | Value *V2 = EmitScalarExpr(E->getArg(1)); |
17915 | Value *C = EmitScalarExpr(E->getArg(2)); |
17916 | Function *Callee = |
17917 | CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType())); |
17918 | return Builder.CreateCall(Callee, {V1, V2, C}); |
17919 | } |
17920 | case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: { |
17921 | Value *LHS = EmitScalarExpr(E->getArg(0)); |
17922 | Value *RHS = EmitScalarExpr(E->getArg(1)); |
17923 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot); |
17924 | return Builder.CreateCall(Callee, {LHS, RHS}); |
17925 | } |
17926 | case WebAssembly::BI__builtin_wasm_popcnt_i8x16: { |
17927 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
17928 | Function *Callee = |
17929 | CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType())); |
17930 | return Builder.CreateCall(Callee, {Vec}); |
17931 | } |
17932 | case WebAssembly::BI__builtin_wasm_any_true_v128: |
17933 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
17934 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
17935 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
17936 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: { |
17937 | unsigned IntNo; |
17938 | switch (BuiltinID) { |
17939 | case WebAssembly::BI__builtin_wasm_any_true_v128: |
17940 | IntNo = Intrinsic::wasm_anytrue; |
17941 | break; |
17942 | case WebAssembly::BI__builtin_wasm_all_true_i8x16: |
17943 | case WebAssembly::BI__builtin_wasm_all_true_i16x8: |
17944 | case WebAssembly::BI__builtin_wasm_all_true_i32x4: |
17945 | case WebAssembly::BI__builtin_wasm_all_true_i64x2: |
17946 | IntNo = Intrinsic::wasm_alltrue; |
17947 | break; |
17948 | default: |
17949 | llvm_unreachable("unexpected builtin ID"); |
17950 | } |
17951 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
17952 | Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType()); |
17953 | return Builder.CreateCall(Callee, {Vec}); |
17954 | } |
17955 | case WebAssembly::BI__builtin_wasm_bitmask_i8x16: |
17956 | case WebAssembly::BI__builtin_wasm_bitmask_i16x8: |
17957 | case WebAssembly::BI__builtin_wasm_bitmask_i32x4: |
17958 | case WebAssembly::BI__builtin_wasm_bitmask_i64x2: { |
17959 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
17960 | Function *Callee = |
17961 | CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType()); |
17962 | return Builder.CreateCall(Callee, {Vec}); |
17963 | } |
17964 | case WebAssembly::BI__builtin_wasm_abs_f32x4: |
17965 | case WebAssembly::BI__builtin_wasm_abs_f64x2: { |
17966 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
17967 | Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType()); |
17968 | return Builder.CreateCall(Callee, {Vec}); |
17969 | } |
17970 | case WebAssembly::BI__builtin_wasm_sqrt_f32x4: |
17971 | case WebAssembly::BI__builtin_wasm_sqrt_f64x2: { |
17972 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
17973 | Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType()); |
17974 | return Builder.CreateCall(Callee, {Vec}); |
17975 | } |
17976 | case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8: |
17977 | case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8: |
17978 | case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4: |
17979 | case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: { |
17980 | Value *Low = EmitScalarExpr(E->getArg(0)); |
17981 | Value *High = EmitScalarExpr(E->getArg(1)); |
17982 | unsigned IntNo; |
17983 | switch (BuiltinID) { |
17984 | case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8: |
17985 | case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4: |
17986 | IntNo = Intrinsic::wasm_narrow_signed; |
17987 | break; |
17988 | case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8: |
17989 | case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: |
17990 | IntNo = Intrinsic::wasm_narrow_unsigned; |
17991 | break; |
17992 | default: |
17993 | llvm_unreachable("unexpected builtin ID"); |
17994 | } |
17995 | Function *Callee = |
17996 | CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()}); |
17997 | return Builder.CreateCall(Callee, {Low, High}); |
17998 | } |
17999 | case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4: |
18000 | case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: { |
18001 | Value *Vec = EmitScalarExpr(E->getArg(0)); |
18002 | unsigned IntNo; |
18003 | switch (BuiltinID) { |
18004 | case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4: |
18005 | IntNo = Intrinsic::fptosi_sat; |
18006 | break; |
18007 | case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: |
18008 | IntNo = Intrinsic::fptoui_sat; |
18009 | break; |
18010 | default: |
18011 | llvm_unreachable("unexpected builtin ID"); |
18012 | } |
18013 | llvm::Type *SrcT = Vec->getType(); |
18014 | llvm::Type *TruncT = |
18015 | SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32)); |
18016 | Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT}); |
18017 | Value *Trunc = Builder.CreateCall(Callee, Vec); |
18018 | Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0)); |
18019 | Value *ConcatMask = |
18020 | llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1), |
18021 | Builder.getInt32(2), Builder.getInt32(3)}); |
18022 | return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask); |
18023 | } |
18024 | case WebAssembly::BI__builtin_wasm_shuffle_i8x16: { |
18025 | Value *Ops[18]; |
18026 | size_t OpIdx = 0; |
18027 | Ops[OpIdx++] = EmitScalarExpr(E->getArg(0)); |
18028 | Ops[OpIdx++] = EmitScalarExpr(E->getArg(1)); |
18029 | while (OpIdx < 18) { |
18030 | Optional<llvm::APSInt> LaneConst = |
18031 | E->getArg(OpIdx)->getIntegerConstantExpr(getContext()); |
18032 | assert(LaneConst && "Constant arg isn't actually constant?"); |
18033 | Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst); |
18034 | } |
18035 | Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle); |
18036 | return Builder.CreateCall(Callee, Ops); |
18037 | } |
18038 | default: |
18039 | return nullptr; |
18040 | } |
18041 | } |
18042 | |
18043 | static std::pair<Intrinsic::ID, unsigned> |
18044 | getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) { |
18045 | struct Info { |
18046 | unsigned BuiltinID; |
18047 | Intrinsic::ID IntrinsicID; |
18048 | unsigned VecLen; |
18049 | }; |
18050 | Info Infos[] = { |
18051 | #define CUSTOM_BUILTIN_MAPPING(x,s) \ |
18052 | { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s }, |
18053 | CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0) |
18054 | CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0) |
18055 | CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0) |
18056 | CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0) |
18057 | CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0) |
18058 | CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0) |
18059 | CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0) |
18060 | CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0) |
18061 | CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0) |
18062 | CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0) |
18063 | CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0) |
18064 | CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0) |
18065 | CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0) |
18066 | CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0) |
18067 | CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0) |
18068 | CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0) |
18069 | CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0) |
18070 | CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0) |
18071 | CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0) |
18072 | CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0) |
18073 | CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0) |
18074 | CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0) |
18075 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64) |
18076 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64) |
18077 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64) |
18078 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64) |
18079 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128) |
18080 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128) |
18081 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128) |
18082 | CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128) |
18083 | #include "clang/Basic/BuiltinsHexagonMapCustomDep.def" |
18084 | #undef CUSTOM_BUILTIN_MAPPING |
18085 | }; |
18086 | |
18087 | auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; }; |
18088 | static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true); |
18089 | (void)SortOnce; |
18090 | |
18091 | const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos), |
18092 | Info{BuiltinID, 0, 0}, CmpInfo); |
18093 | if (F == std::end(Infos) || F->BuiltinID != BuiltinID) |
18094 | return {Intrinsic::not_intrinsic, 0}; |
18095 | |
18096 | return {F->IntrinsicID, F->VecLen}; |
18097 | } |
18098 | |
18099 | Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID, |
18100 | const CallExpr *E) { |
18101 | Intrinsic::ID ID; |
18102 | unsigned VecLen; |
18103 | std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID); |
18104 | |
18105 | auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) { |
18106 | |
18107 | Address A = EmitPointerWithAlignment(E->getArg(0)); |
18108 | Address BP = Address( |
18109 | Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment()); |
18110 | llvm::Value *Base = Builder.CreateLoad(BP); |
18111 | |
18112 | |
18113 | |
18114 | |
18115 | |
18116 | |
18117 | |
18118 | |
18119 | SmallVector<llvm::Value*,5> Ops = { Base }; |
18120 | for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i) |
18121 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
18122 | |
18123 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); |
18124 | |
18125 | |
18126 | llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1) |
18127 | : Result; |
18128 | llvm::Value *LV = Builder.CreateBitCast( |
18129 | EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo()); |
18130 | Address Dest = EmitPointerWithAlignment(E->getArg(0)); |
18131 | llvm::Value *RetVal = |
18132 | Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment()); |
18133 | if (IsLoad) |
18134 | RetVal = Builder.CreateExtractValue(Result, 0); |
18135 | return RetVal; |
18136 | }; |
18137 | |
18138 | |
18139 | |
18140 | |
18141 | auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) { |
18142 | |
18143 | |
18144 | |
18145 | llvm::Value *BaseAddress = |
18146 | Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy); |
18147 | |
18148 | |
18149 | |
18150 | |
18151 | Address DestAddr = EmitPointerWithAlignment(E->getArg(1)); |
18152 | DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy), |
18153 | DestAddr.getAlignment()); |
18154 | llvm::Value *DestAddress = DestAddr.getPointer(); |
18155 | |
18156 | |
18157 | |
18158 | |
18159 | llvm::Value *Result = Builder.CreateCall( |
18160 | CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))}); |
18161 | |
18162 | |
18163 | llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0); |
18164 | |
18165 | |
18166 | |
18167 | |
18168 | DestVal = Builder.CreateTrunc(DestVal, DestTy); |
18169 | |
18170 | llvm::Value *DestForStore = |
18171 | Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo()); |
18172 | Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment()); |
18173 | |
18174 | return Builder.CreateExtractValue(Result, 1); |
18175 | }; |
18176 | |
18177 | auto V2Q = [this, VecLen] (llvm::Value *Vec) { |
18178 | Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B |
18179 | : Intrinsic::hexagon_V6_vandvrt; |
18180 | return Builder.CreateCall(CGM.getIntrinsic(ID), |
18181 | {Vec, Builder.getInt32(-1)}); |
18182 | }; |
18183 | auto Q2V = [this, VecLen] (llvm::Value *Pred) { |
18184 | Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B |
18185 | : Intrinsic::hexagon_V6_vandqrt; |
18186 | return Builder.CreateCall(CGM.getIntrinsic(ID), |
18187 | {Pred, Builder.getInt32(-1)}); |
18188 | }; |
18189 | |
18190 | switch (BuiltinID) { |
18191 | |
18192 | |
18193 | |
18194 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry: |
18195 | case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: |
18196 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry: |
18197 | case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: { |
18198 | |
18199 | llvm::Type *VecType = ConvertType(E->getArg(0)->getType()); |
18200 | Address PredAddr = Builder.CreateBitCast( |
18201 | EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0)); |
18202 | llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr)); |
18203 | llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), |
18204 | {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn}); |
18205 | |
18206 | llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1); |
18207 | Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(), |
18208 | PredAddr.getAlignment()); |
18209 | return Builder.CreateExtractValue(Result, 0); |
18210 | } |
18211 | |
18212 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci: |
18213 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci: |
18214 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci: |
18215 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci: |
18216 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci: |
18217 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci: |
18218 | case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr: |
18219 | case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr: |
18220 | case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr: |
18221 | case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr: |
18222 | case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr: |
18223 | case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr: |
18224 | return MakeCircOp(ID, true); |
18225 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci: |
18226 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci: |
18227 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci: |
18228 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci: |
18229 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci: |
18230 | case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr: |
18231 | case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr: |
18232 | case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr: |
18233 | case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr: |
18234 | case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr: |
18235 | return MakeCircOp(ID, false); |
18236 | case Hexagon::BI__builtin_brev_ldub: |
18237 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty); |
18238 | case Hexagon::BI__builtin_brev_ldb: |
18239 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty); |
18240 | case Hexagon::BI__builtin_brev_lduh: |
18241 | return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty); |
18242 | case Hexagon::BI__builtin_brev_ldh: |
18243 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty); |
18244 | case Hexagon::BI__builtin_brev_ldw: |
18245 | return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty); |
18246 | case Hexagon::BI__builtin_brev_ldd: |
18247 | return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty); |
18248 | |
18249 | default: { |
18250 | if (ID == Intrinsic::not_intrinsic) |
18251 | return nullptr; |
18252 | |
18253 | auto IsVectorPredTy = [](llvm::Type *T) { |
18254 | return T->isVectorTy() && |
18255 | cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1); |
18256 | }; |
18257 | |
18258 | llvm::Function *IntrFn = CGM.getIntrinsic(ID); |
18259 | llvm::FunctionType *IntrTy = IntrFn->getFunctionType(); |
18260 | SmallVector<llvm::Value*,4> Ops; |
18261 | for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) { |
18262 | llvm::Type *T = IntrTy->getParamType(i); |
18263 | const Expr *A = E->getArg(i); |
18264 | if (IsVectorPredTy(T)) { |
18265 | |
18266 | if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) { |
18267 | if (Cast->getCastKind() == CK_BitCast) |
18268 | A = Cast->getSubExpr(); |
18269 | } |
18270 | Ops.push_back(V2Q(EmitScalarExpr(A))); |
18271 | } else { |
18272 | Ops.push_back(EmitScalarExpr(A)); |
18273 | } |
18274 | } |
18275 | |
18276 | llvm::Value *Call = Builder.CreateCall(IntrFn, Ops); |
18277 | if (IsVectorPredTy(IntrTy->getReturnType())) |
18278 | Call = Q2V(Call); |
18279 | |
18280 | return Call; |
18281 | } |
18282 | } |
18283 | |
18284 | return nullptr; |
18285 | } |
18286 | |
18287 | Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID, |
18288 | const CallExpr *E, |
18289 | ReturnValueSlot ReturnValue) { |
18290 | SmallVector<Value *, 4> Ops; |
18291 | llvm::Type *ResultType = ConvertType(E->getType()); |
18292 | |
18293 | for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) |
18294 | Ops.push_back(EmitScalarExpr(E->getArg(i))); |
18295 | |
18296 | Intrinsic::ID ID = Intrinsic::not_intrinsic; |
18297 | unsigned NF = 1; |
18298 | |
18299 | |
18300 | llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes; |
18301 | switch (BuiltinID) { |
18302 | default: llvm_unreachable("unexpected builtin ID"); |
18303 | case RISCV::BI__builtin_riscv_orc_b_32: |
18304 | case RISCV::BI__builtin_riscv_orc_b_64: |
18305 | case RISCV::BI__builtin_riscv_clmul: |
18306 | case RISCV::BI__builtin_riscv_clmulh: |
18307 | case RISCV::BI__builtin_riscv_clmulr: |
18308 | case RISCV::BI__builtin_riscv_bcompress_32: |
18309 | case RISCV::BI__builtin_riscv_bcompress_64: |
18310 | case RISCV::BI__builtin_riscv_bdecompress_32: |
18311 | case RISCV::BI__builtin_riscv_bdecompress_64: |
18312 | case RISCV::BI__builtin_riscv_grev_32: |
18313 | case RISCV::BI__builtin_riscv_grev_64: |
18314 | case RISCV::BI__builtin_riscv_gorc_32: |
18315 | case RISCV::BI__builtin_riscv_gorc_64: |
18316 | case RISCV::BI__builtin_riscv_shfl_32: |
18317 | case RISCV::BI__builtin_riscv_shfl_64: |
18318 | case RISCV::BI__builtin_riscv_unshfl_32: |
18319 | case RISCV::BI__builtin_riscv_unshfl_64: |
18320 | case RISCV::BI__builtin_riscv_xperm_n: |
18321 | case RISCV::BI__builtin_riscv_xperm_b: |
18322 | case RISCV::BI__builtin_riscv_xperm_h: |
18323 | case RISCV::BI__builtin_riscv_xperm_w: |
18324 | case RISCV::BI__builtin_riscv_crc32_b: |
18325 | case RISCV::BI__builtin_riscv_crc32_h: |
18326 | case RISCV::BI__builtin_riscv_crc32_w: |
18327 | case RISCV::BI__builtin_riscv_crc32_d: |
18328 | case RISCV::BI__builtin_riscv_crc32c_b: |
18329 | case RISCV::BI__builtin_riscv_crc32c_h: |
18330 | case RISCV::BI__builtin_riscv_crc32c_w: |
18331 | case RISCV::BI__builtin_riscv_crc32c_d: { |
18332 | switch (BuiltinID) { |
18333 | default: llvm_unreachable("unexpected builtin ID"); |
18334 | |
18335 | case RISCV::BI__builtin_riscv_orc_b_32: |
18336 | case RISCV::BI__builtin_riscv_orc_b_64: |
18337 | ID = Intrinsic::riscv_orc_b; |
18338 | break; |
18339 | |
18340 | |
18341 | case RISCV::BI__builtin_riscv_clmul: |
18342 | ID = Intrinsic::riscv_clmul; |
18343 | break; |
18344 | case RISCV::BI__builtin_riscv_clmulh: |
18345 | ID = Intrinsic::riscv_clmulh; |
18346 | break; |
18347 | case RISCV::BI__builtin_riscv_clmulr: |
18348 | ID = Intrinsic::riscv_clmulr; |
18349 | break; |
18350 | |
18351 | |
18352 | case RISCV::BI__builtin_riscv_bcompress_32: |
18353 | case RISCV::BI__builtin_riscv_bcompress_64: |
18354 | ID = Intrinsic::riscv_bcompress; |
18355 | break; |
18356 | case RISCV::BI__builtin_riscv_bdecompress_32: |
18357 | case RISCV::BI__builtin_riscv_bdecompress_64: |
18358 | ID = Intrinsic::riscv_bdecompress; |
18359 | break; |
18360 | |
18361 | |
18362 | case RISCV::BI__builtin_riscv_grev_32: |
18363 | case RISCV::BI__builtin_riscv_grev_64: |
18364 | ID = Intrinsic::riscv_grev; |
18365 | break; |
18366 | case RISCV::BI__builtin_riscv_gorc_32: |
18367 | case RISCV::BI__builtin_riscv_gorc_64: |
18368 | ID = Intrinsic::riscv_gorc; |
18369 | break; |
18370 | case RISCV::BI__builtin_riscv_shfl_32: |
18371 | case RISCV::BI__builtin_riscv_shfl_64: |
18372 | ID = Intrinsic::riscv_shfl; |
18373 | break; |
18374 | case RISCV::BI__builtin_riscv_unshfl_32: |
18375 | case RISCV::BI__builtin_riscv_unshfl_64: |
18376 | ID = Intrinsic::riscv_unshfl; |
18377 | break; |
18378 | case RISCV::BI__builtin_riscv_xperm_n: |
18379 | ID = Intrinsic::riscv_xperm_n; |
18380 | break; |
18381 | case RISCV::BI__builtin_riscv_xperm_b: |
18382 | ID = Intrinsic::riscv_xperm_b; |
18383 | break; |
18384 | case RISCV::BI__builtin_riscv_xperm_h: |
18385 | ID = Intrinsic::riscv_xperm_h; |
18386 | break; |
18387 | case RISCV::BI__builtin_riscv_xperm_w: |
18388 | ID = Intrinsic::riscv_xperm_w; |
18389 | break; |
18390 | |
18391 | |
18392 | case RISCV::BI__builtin_riscv_crc32_b: |
18393 | ID = Intrinsic::riscv_crc32_b; |
18394 | break; |
18395 | case RISCV::BI__builtin_riscv_crc32_h: |
18396 | ID = Intrinsic::riscv_crc32_h; |
18397 | break; |
18398 | case RISCV::BI__builtin_riscv_crc32_w: |
18399 | ID = Intrinsic::riscv_crc32_w; |
18400 | break; |
18401 | case RISCV::BI__builtin_riscv_crc32_d: |
18402 | ID = Intrinsic::riscv_crc32_d; |
18403 | break; |
18404 | case RISCV::BI__builtin_riscv_crc32c_b: |
18405 | ID = Intrinsic::riscv_crc32c_b; |
18406 | break; |
18407 | case RISCV::BI__builtin_riscv_crc32c_h: |
18408 | ID = Intrinsic::riscv_crc32c_h; |
18409 | break; |
18410 | case RISCV::BI__builtin_riscv_crc32c_w: |
18411 | ID = Intrinsic::riscv_crc32c_w; |
18412 | break; |
18413 | case RISCV::BI__builtin_riscv_crc32c_d: |
18414 | ID = Intrinsic::riscv_crc32c_d; |
18415 | break; |
18416 | } |
18417 | |
18418 | IntrinsicTypes = {ResultType}; |
18419 | break; |
18420 | } |
18421 | |
18422 | #include "clang/Basic/riscv_vector_builtin_cg.inc" |
18423 | } |
18424 | |
18425 | assert(ID != Intrinsic::not_intrinsic); |
18426 | |
18427 | llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); |
18428 | return Builder.CreateCall(F, Ops, ""); |
18429 | } |