clang 23.0.0git
CIRGenBuiltinX86.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit x86/x86_64 Builtin calls as CIR or a function
10// call to be later resolved.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CIRGenBuilder.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "mlir/IR/Attributes.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Location.h"
20#include "mlir/IR/Types.h"
21#include "mlir/IR/ValueRange.h"
27#include "llvm/ADT/Sequence.h"
28#include "llvm/Support/ErrorHandling.h"
29#include <string>
30
31using namespace clang;
32using namespace clang::CIRGen;
33
34// OG has unordered comparison as a form of optimization in addition to
35// ordered comparison, while CIR doesn't.
36//
37// This means that we can't encode the comparison code of UGT (unordered
38// greater than), at least not at the CIR level.
39//
40// The boolean shouldInvert compensates for this.
41// For example: to get to the comparison code UGT, we pass in
42// emitVectorFCmp (OLE, shouldInvert = true) since OLE is the inverse of UGT.
43
44// There are several ways to support this otherwise:
45// - register extra CmpOpKind for unordered comparison types and build the
46// translation code for
47// to go from CIR -> LLVM dialect. Notice we get this naturally with
48// shouldInvert, benefiting from existing infrastructure, albeit having to
49// generate an extra `not` at CIR).
50// - Just add extra comparison code to a new VecCmpOpKind instead of
51// cluttering CmpOpKind.
52// - Add a boolean in VecCmpOp to indicate if it's doing unordered or ordered
53// comparison
54// - Just emit the intrinsics call instead of calling this helper, see how the
55// LLVM lowering handles this.
56static mlir::Value emitVectorFCmp(CIRGenBuilderTy &builder,
58 mlir::Location loc, cir::CmpOpKind pred,
59 bool shouldInvert) {
61 // TODO(cir): Add isSignaling boolean once emitConstrainedFPCall implemented
63 mlir::Value cmp = builder.createVecCompare(loc, pred, ops[0], ops[1]);
64 mlir::Value bitCast = builder.createBitcast(
65 shouldInvert ? builder.createNot(cmp) : cmp, ops[0].getType());
66 return bitCast;
67}
68
69static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder, mlir::Location loc,
70 mlir::Value mask, unsigned numElems) {
71 auto maskTy = cir::VectorType::get(
72 builder.getSIntNTy(1), cast<cir::IntType>(mask.getType()).getWidth());
73 mlir::Value maskVec = builder.createBitcast(mask, maskTy);
74
75 // If we have less than 8 elements, then the starting mask was an i8 and
76 // we need to extract down to the right number of elements.
77 if (numElems < 8) {
79 mlir::Type i32Ty = builder.getSInt32Ty();
80 for (auto i : llvm::seq<unsigned>(0, numElems))
81 indices.push_back(cir::IntAttr::get(i32Ty, i));
82
83 maskVec = builder.createVecShuffle(loc, maskVec, maskVec, indices);
84 }
85 return maskVec;
86}
87
88// Builds the VecShuffleOp for pshuflw and pshufhw x86 builtins.
89//
90// The vector is split into lanes of 8 word elements (16 bits). The lower or
91// upper half of each lane, controlled by `isLow`, is shuffled in the following
92// way: The immediate is truncated to 8 bits, separated into 4 2-bit fields. The
93// i-th field's value represents the resulting index of the i-th element in the
94// half lane after shuffling. The other half of the lane remains unchanged.
95static cir::VecShuffleOp emitPshufWord(CIRGenBuilderTy &builder,
96 const mlir::Value vec,
97 const mlir::Value immediate,
98 const mlir::Location loc,
99 const bool isLow) {
100 uint32_t imm = CIRGenFunction::getZExtIntValueFromConstOp(immediate);
101
102 auto vecTy = cast<cir::VectorType>(vec.getType());
103 unsigned numElts = vecTy.getSize();
104
105 unsigned firstHalfStart = isLow ? 0 : 4;
106 unsigned secondHalfStart = 4 - firstHalfStart;
107
108 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
109 imm = (imm & 0xff) * 0x01010101;
110
111 int64_t indices[32];
112 for (unsigned l = 0; l != numElts; l += 8) {
113 for (unsigned i = firstHalfStart; i != firstHalfStart + 4; ++i) {
114 indices[l + i] = l + (imm & 3) + firstHalfStart;
115 imm >>= 2;
116 }
117 for (unsigned i = secondHalfStart; i != secondHalfStart + 4; ++i)
118 indices[l + i] = l + i;
119 }
120
121 return builder.createVecShuffle(loc, vec, ArrayRef(indices, numElts));
122}
123
124// Builds the shuffle mask for pshufd and shufpd/shufps x86 builtins.
125// The shuffle mask is written to outIndices.
126static void
127computeFullLaneShuffleMask(CIRGenFunction &cgf, const mlir::Value vec,
128 uint32_t imm, const bool isShufP,
129 llvm::SmallVectorImpl<int64_t> &outIndices) {
130 auto vecTy = cast<cir::VectorType>(vec.getType());
131 unsigned numElts = vecTy.getSize();
132 unsigned numLanes = cgf.cgm.getDataLayout().getTypeSizeInBits(vecTy) / 128;
133 unsigned numLaneElts = numElts / numLanes;
134
135 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
136 imm = (imm & 0xff) * 0x01010101;
137
138 for (unsigned l = 0; l != numElts; l += numLaneElts) {
139 for (unsigned i = 0; i != numLaneElts; ++i) {
140 uint32_t idx = imm % numLaneElts;
141 imm /= numLaneElts;
142 if (isShufP && i >= (numLaneElts / 2))
143 idx += numElts;
144 outIndices[l + i] = l + idx;
145 }
146 }
147
148 outIndices.resize(numElts);
149}
150
151static mlir::Value emitPrefetch(CIRGenFunction &cgf, unsigned builtinID,
152 const CallExpr *e,
153 const SmallVector<mlir::Value> &ops) {
154 CIRGenBuilderTy &builder = cgf.getBuilder();
155 mlir::Location location = cgf.getLoc(e->getExprLoc());
156 mlir::Type voidTy = builder.getVoidTy();
157 mlir::Value address = builder.createPtrBitcast(ops[0], voidTy);
158 bool isWrite{};
159 int locality{};
160
161 assert(builtinID == X86::BI_mm_prefetch || builtinID == X86::BI_m_prefetchw ||
162 builtinID == X86::BI_m_prefetch && "Expected prefetch builtin");
163
164 if (builtinID == X86::BI_mm_prefetch) {
165 int hint = cgf.getSExtIntValueFromConstOp(ops[1]);
166 isWrite = (hint >> 2) & 0x1;
167 locality = hint & 0x3;
168 } else {
169 isWrite = (builtinID == X86::BI_m_prefetchw);
170 locality = 0x3;
171 }
172
173 cir::PrefetchOp::create(builder, location, address, locality, isWrite);
174 return {};
175}
176
177static mlir::Value emitX86CompressExpand(CIRGenBuilderTy &builder,
178 mlir::Location loc, mlir::Value source,
179 mlir::Value mask,
180 mlir::Value inputVector,
181 const std::string &id) {
182 auto resultTy = cast<cir::VectorType>(mask.getType());
183 mlir::Value maskValue = getMaskVecValue(
184 builder, loc, inputVector, cast<cir::VectorType>(resultTy).getSize());
185 return builder.emitIntrinsicCallOp(loc, id, resultTy,
186 mlir::ValueRange{source, mask, maskValue});
187}
188
189static mlir::Value emitX86Select(CIRGenBuilderTy &builder, mlir::Location loc,
190 mlir::Value mask, mlir::Value op0,
191 mlir::Value op1) {
192 auto constOp = mlir::dyn_cast_or_null<cir::ConstantOp>(mask.getDefiningOp());
193 // If the mask is all ones just return first argument.
194 if (constOp && constOp.isAllOnesValue())
195 return op0;
196
197 mask = getMaskVecValue(builder, loc, mask,
198 cast<cir::VectorType>(op0.getType()).getSize());
199
200 return cir::VecTernaryOp::create(builder, loc, mask, op0, op1);
201}
202
203static mlir::Value emitX86MaskAddLogic(CIRGenBuilderTy &builder,
204 mlir::Location loc,
205 const std::string &intrinsicName,
207
208 auto intTy = cast<cir::IntType>(ops[0].getType());
209 unsigned numElts = intTy.getWidth();
210 mlir::Value lhsVec = getMaskVecValue(builder, loc, ops[0], numElts);
211 mlir::Value rhsVec = getMaskVecValue(builder, loc, ops[1], numElts);
212 mlir::Type vecTy = lhsVec.getType();
213 mlir::Value resVec = builder.emitIntrinsicCallOp(
214 loc, intrinsicName, vecTy, mlir::ValueRange{lhsVec, rhsVec});
215 return builder.createBitcast(resVec, ops[0].getType());
216}
217
218static mlir::Value emitX86MaskUnpack(CIRGenBuilderTy &builder,
219 mlir::Location loc,
220 const std::string &intrinsicName,
222 unsigned numElems = cast<cir::IntType>(ops[0].getType()).getWidth();
223
224 // Convert both operands to mask vectors.
225 mlir::Value lhs = getMaskVecValue(builder, loc, ops[0], numElems);
226 mlir::Value rhs = getMaskVecValue(builder, loc, ops[1], numElems);
227
228 mlir::Type i32Ty = builder.getSInt32Ty();
229
230 // Create indices for extracting the first half of each vector.
232 for (auto i : llvm::seq<unsigned>(0, numElems / 2))
233 halfIndices.push_back(cir::IntAttr::get(i32Ty, i));
234
235 // Extract first half of each vector. This gives better codegen than
236 // doing it in a single shuffle.
237 mlir::Value lhsHalf = builder.createVecShuffle(loc, lhs, lhs, halfIndices);
238 mlir::Value rhsHalf = builder.createVecShuffle(loc, rhs, rhs, halfIndices);
239
240 // Create indices for concatenating the vectors.
241 // NOTE: Operands are swapped to match the intrinsic definition.
242 // After the half extraction, both vectors have numElems/2 elements.
243 // In createVecShuffle(rhsHalf, lhsHalf, indices), indices [0..numElems/2-1]
244 // select from rhsHalf, and indices [numElems/2..numElems-1] select from
245 // lhsHalf.
247 for (auto i : llvm::seq<unsigned>(0, numElems))
248 concatIndices.push_back(cir::IntAttr::get(i32Ty, i));
249
250 // Concat the vectors (RHS first, then LHS).
251 mlir::Value res =
252 builder.createVecShuffle(loc, rhsHalf, lhsHalf, concatIndices);
253 return builder.createBitcast(res, ops[0].getType());
254}
255
256static mlir::Value emitX86MaskLogic(CIRGenBuilderTy &builder,
257 mlir::Location loc,
258 cir::BinOpKind binOpKind,
260 bool invertLHS = false) {
261 unsigned numElts = cast<cir::IntType>(ops[0].getType()).getWidth();
262 mlir::Value lhs = getMaskVecValue(builder, loc, ops[0], numElts);
263 mlir::Value rhs = getMaskVecValue(builder, loc, ops[1], numElts);
264
265 if (invertLHS)
266 lhs = builder.createNot(lhs);
267 return builder.createBitcast(builder.createBinop(loc, lhs, binOpKind, rhs),
268 ops[0].getType());
269}
270
271static mlir::Value emitX86MaskTest(CIRGenBuilderTy &builder, mlir::Location loc,
272 const std::string &intrinsicName,
274 auto intTy = cast<cir::IntType>(ops[0].getType());
275 unsigned numElts = intTy.getWidth();
276 mlir::Value lhsVec = getMaskVecValue(builder, loc, ops[0], numElts);
277 mlir::Value rhsVec = getMaskVecValue(builder, loc, ops[1], numElts);
278 mlir::Type resTy = builder.getSInt32Ty();
279 return builder.emitIntrinsicCallOp(loc, intrinsicName, resTy,
280 mlir::ValueRange{lhsVec, rhsVec});
281}
282
283static mlir::Value emitX86MaskedCompareResult(CIRGenBuilderTy &builder,
284 mlir::Value cmp, unsigned numElts,
285 mlir::Value maskIn,
286 mlir::Location loc) {
287 if (maskIn) {
288 auto c = mlir::dyn_cast_or_null<cir::ConstantOp>(maskIn.getDefiningOp());
289 if (!c || !c.isAllOnesValue())
290 cmp = builder.createAnd(loc, cmp,
291 getMaskVecValue(builder, loc, maskIn, numElts));
292 }
293 if (numElts < 8) {
295 mlir::Type i64Ty = builder.getSInt64Ty();
296
297 for (unsigned i = 0; i != numElts; ++i)
298 indices.push_back(cir::IntAttr::get(i64Ty, i));
299 for (unsigned i = numElts; i != 8; ++i)
300 indices.push_back(cir::IntAttr::get(i64Ty, i % numElts + numElts));
301
302 // This should shuffle between cmp (first vector) and null (second vector)
303 mlir::Value nullVec = builder.getNullValue(cmp.getType(), loc);
304 cmp = builder.createVecShuffle(loc, cmp, nullVec, indices);
305 }
306 return builder.createBitcast(cmp, builder.getUIntNTy(std::max(numElts, 8U)));
307}
308
309// TODO: The cgf parameter should be removed when all the NYI cases are
310// implemented.
311static std::optional<mlir::Value>
313 bool isSigned, ArrayRef<mlir::Value> ops,
314 mlir::Location loc) {
315 assert((ops.size() == 2 || ops.size() == 4) &&
316 "Unexpected number of arguments");
317 unsigned numElts = cast<cir::VectorType>(ops[0].getType()).getSize();
318 mlir::Value cmp;
319
320 if (cc == 3) {
321 cgf.cgm.errorNYI(loc, "emitX86MaskedCompare: cc == 3");
322 return {};
323 } else if (cc == 7) {
324 cgf.cgm.errorNYI(loc, "emitX86MaskedCompare cc == 7");
325 return {};
326 } else {
327 cir::CmpOpKind pred;
328 switch (cc) {
329 default:
330 llvm_unreachable("Unknown condition code");
331 case 0:
332 pred = cir::CmpOpKind::eq;
333 break;
334 case 1:
335 pred = cir::CmpOpKind::lt;
336 break;
337 case 2:
338 pred = cir::CmpOpKind::le;
339 break;
340 case 4:
341 pred = cir::CmpOpKind::ne;
342 break;
343 case 5:
344 pred = cir::CmpOpKind::ge;
345 break;
346 case 6:
347 pred = cir::CmpOpKind::gt;
348 break;
349 }
350
351 auto resultTy = cir::VectorType::get(builder.getSIntNTy(1), numElts);
352 cmp = cir::VecCmpOp::create(builder, loc, resultTy, pred, ops[0], ops[1]);
353 }
354
355 mlir::Value maskIn;
356 if (ops.size() == 4)
357 maskIn = ops[3];
358
359 return emitX86MaskedCompareResult(builder, cmp, numElts, maskIn, loc);
360}
361
362// TODO: The cgf parameter should be removed when all the NYI cases are
363// implemented.
364static std::optional<mlir::Value> emitX86ConvertToMask(CIRGenFunction &cgf,
365 CIRGenBuilderTy &builder,
366 mlir::Value in,
367 mlir::Location loc) {
368 cir::ConstantOp zero = builder.getNullValue(in.getType(), loc);
369 return emitX86MaskedCompare(cgf, builder, 1, true, {in, zero}, loc);
370}
371
372static std::optional<mlir::Value> emitX86SExtMask(CIRGenBuilderTy &builder,
373 mlir::Value op,
374 mlir::Type dstTy,
375 mlir::Location loc) {
376 unsigned numberOfElements = cast<cir::VectorType>(dstTy).getSize();
377 mlir::Value mask = getMaskVecValue(builder, loc, op, numberOfElements);
378
379 return builder.createCast(loc, cir::CastKind::integral, mask, dstTy);
380}
381
382static mlir::Value emitVecInsert(CIRGenBuilderTy &builder, mlir::Location loc,
383 mlir::Value vec, mlir::Value value,
384 mlir::Value indexOp) {
385 unsigned numElts = cast<cir::VectorType>(vec.getType()).getSize();
386
387 uint64_t index =
388 indexOp.getDefiningOp<cir::ConstantOp>().getIntValue().getZExtValue();
389
390 index &= numElts - 1;
391
392 cir::ConstantOp indexVal = builder.getUInt64(index, loc);
393
394 return cir::VecInsertOp::create(builder, loc, vec, value, indexVal);
395}
396
397static mlir::Value emitX86FunnelShift(CIRGenBuilderTy &builder,
398 mlir::Location location, mlir::Value &op0,
399 mlir::Value &op1, mlir::Value &amt,
400 bool isRight) {
401 mlir::Type op0Ty = op0.getType();
402
403 // Amount may be scalar immediate, in which case create a splat vector.
404 // Funnel shifts amounts are treated as modulo and types are all power-of-2
405 // so we only care about the lowest log2 bits anyway.
406 if (amt.getType() != op0Ty) {
407 auto vecTy = mlir::cast<cir::VectorType>(op0Ty);
408 uint64_t numElems = vecTy.getSize();
409
410 auto amtTy = mlir::cast<cir::IntType>(amt.getType());
411 auto vecElemTy = mlir::cast<cir::IntType>(vecTy.getElementType());
412
413 // If signed, cast to the same width but unsigned first to
414 // ensure zero-extension when casting to a bigger unsigned `vecElemeTy`.
415 if (amtTy.isSigned()) {
416 cir::IntType unsignedAmtTy = builder.getUIntNTy(amtTy.getWidth());
417 amt = builder.createIntCast(amt, unsignedAmtTy);
418 }
419 cir::IntType unsignedVecElemType = builder.getUIntNTy(vecElemTy.getWidth());
420 amt = builder.createIntCast(amt, unsignedVecElemType);
421 amt = cir::VecSplatOp::create(
422 builder, location, cir::VectorType::get(unsignedVecElemType, numElems),
423 amt);
424 }
425
426 const StringRef intrinsicName = isRight ? "fshr" : "fshl";
427 return builder.emitIntrinsicCallOp(location, intrinsicName, op0Ty,
428 mlir::ValueRange{op0, op1, amt});
429}
430
431static mlir::Value emitX86Muldq(CIRGenBuilderTy &builder, mlir::Location loc,
432 bool isSigned,
434 unsigned opTypePrimitiveSizeInBits) {
435 mlir::Type ty = cir::VectorType::get(builder.getSInt64Ty(),
436 opTypePrimitiveSizeInBits / 64);
437 mlir::Value lhs = builder.createBitcast(loc, ops[0], ty);
438 mlir::Value rhs = builder.createBitcast(loc, ops[1], ty);
439 if (isSigned) {
440 cir::ConstantOp shiftAmt =
441 builder.getConstant(loc, cir::IntAttr::get(builder.getSInt64Ty(), 32));
442 cir::VecSplatOp shiftSplatVecOp =
443 cir::VecSplatOp::create(builder, loc, ty, shiftAmt.getResult());
444 mlir::Value shiftSplatValue = shiftSplatVecOp.getResult();
445 // In CIR, right-shift operations are automatically lowered to either an
446 // arithmetic or logical shift depending on the operand type. The purpose
447 // of the shifts here is to propagate the sign bit of the 32-bit input
448 // into the upper bits of each vector lane.
449 lhs = builder.createShift(loc, lhs, shiftSplatValue, true);
450 lhs = builder.createShift(loc, lhs, shiftSplatValue, false);
451 rhs = builder.createShift(loc, rhs, shiftSplatValue, true);
452 rhs = builder.createShift(loc, rhs, shiftSplatValue, false);
453 } else {
454 cir::ConstantOp maskScalar = builder.getConstant(
455 loc, cir::IntAttr::get(builder.getSInt64Ty(), 0xffffffff));
456 cir::VecSplatOp mask =
457 cir::VecSplatOp::create(builder, loc, ty, maskScalar.getResult());
458 // Clear the upper bits
459 lhs = builder.createAnd(loc, lhs, mask);
460 rhs = builder.createAnd(loc, rhs, mask);
461 }
462 return builder.createMul(loc, lhs, rhs);
463}
464
465// Convert f16 half values to floats.
466static mlir::Value emitX86CvtF16ToFloatExpr(CIRGenBuilderTy &builder,
467 mlir::Location loc,
469 mlir::Type dstTy) {
470 assert((ops.size() == 1 || ops.size() == 3 || ops.size() == 4) &&
471 "Unknown cvtph2ps intrinsic");
472
473 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
474 if (ops.size() == 4) {
475 auto constOp = ops[3].getDefiningOp<cir::ConstantOp>();
476 assert(constOp && "Expected constant operand");
477 if (constOp.getIntValue().getZExtValue() != 4) {
478 return builder.emitIntrinsicCallOp(loc, "x86.avx512.mask.vcvtph2ps.512",
479 dstTy, ops);
480 }
481 }
482
483 unsigned numElts = cast<cir::VectorType>(dstTy).getSize();
484 mlir::Value src = ops[0];
485
486 // Extract the subvector
487 if (numElts != cast<cir::VectorType>(src.getType()).getSize()) {
488 assert(numElts == 4 && "Unexpected vector size");
489 src = builder.createVecShuffle(loc, src, {0, 1, 2, 3});
490 }
491
492 // Bitcast from vXi16 to vXf16.
493 cir::VectorType halfTy =
494 cir::VectorType::get(cir::FP16Type::get(builder.getContext()), numElts);
495
496 src = builder.createCast(cir::CastKind::bitcast, src, halfTy);
497
498 // Perform the fp-extension
499 mlir::Value res = builder.createCast(cir::CastKind::floating, src, dstTy);
500
501 if (ops.size() >= 3)
502 res = emitX86Select(builder, loc, ops[2], res, ops[1]);
503 return res;
504}
505
506static mlir::Value emitX86vpcom(CIRGenBuilderTy &builder, mlir::Location loc,
508 bool isSigned) {
509 mlir::Value op0 = ops[0];
510 mlir::Value op1 = ops[1];
511
512 cir::VectorType ty = cast<cir::VectorType>(op0.getType());
513 cir::IntType elementTy = cast<cir::IntType>(ty.getElementType());
514
515 uint64_t imm = CIRGenFunction::getZExtIntValueFromConstOp(ops[2]) & 0x7;
516
517 cir::CmpOpKind pred;
518 switch (imm) {
519 case 0x0:
520 pred = cir::CmpOpKind::lt;
521 break;
522 case 0x1:
523 pred = cir::CmpOpKind::le;
524 break;
525 case 0x2:
526 pred = cir::CmpOpKind::gt;
527 break;
528 case 0x3:
529 pred = cir::CmpOpKind::ge;
530 break;
531 case 0x4:
532 pred = cir::CmpOpKind::eq;
533 break;
534 case 0x5:
535 pred = cir::CmpOpKind::ne;
536 break;
537 case 0x6:
538 return builder.getNullValue(ty, loc); // FALSE
539 case 0x7: {
540 llvm::APInt allOnes = llvm::APInt::getAllOnes(elementTy.getWidth());
541 return cir::VecSplatOp::create(
542 builder, loc, ty,
543 builder.getConstAPInt(loc, elementTy, allOnes)); // TRUE
544 }
545 default:
546 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
547 }
548
549 if ((!isSigned && elementTy.isSigned()) ||
550 (isSigned && elementTy.isUnsigned())) {
551 elementTy = elementTy.isSigned() ? builder.getUIntNTy(elementTy.getWidth())
552 : builder.getSIntNTy(elementTy.getWidth());
553 ty = cir::VectorType::get(elementTy, ty.getSize());
554 op0 = builder.createBitcast(op0, ty);
555 op1 = builder.createBitcast(op1, ty);
556 }
557
558 return builder.createVecCompare(loc, pred, op0, op1);
559}
560
561static mlir::Value emitX86Fpclass(CIRGenBuilderTy &builder, mlir::Location loc,
562 unsigned builtinID,
564 unsigned numElts = cast<cir::VectorType>(ops[0].getType()).getSize();
565 mlir::Value maskIn = ops[2];
566 ops.erase(ops.begin() + 2);
567
568 StringRef intrinsicName;
569 switch (builtinID) {
570 default:
571 llvm_unreachable("Unsupported fpclass builtin");
572 case X86::BI__builtin_ia32_vfpclassbf16128_mask:
573 intrinsicName = "x86.avx10.fpclass.bf16.128";
574 break;
575 case X86::BI__builtin_ia32_vfpclassbf16256_mask:
576 intrinsicName = "x86.avx10.fpclass.bf16.256";
577 break;
578 case X86::BI__builtin_ia32_vfpclassbf16512_mask:
579 intrinsicName = "x86.avx10.fpclass.bf16.512";
580 break;
581 case X86::BI__builtin_ia32_fpclassph128_mask:
582 intrinsicName = "x86.avx512fp16.fpclass.ph.128";
583 break;
584 case X86::BI__builtin_ia32_fpclassph256_mask:
585 intrinsicName = "x86.avx512fp16.fpclass.ph.256";
586 break;
587 case X86::BI__builtin_ia32_fpclassph512_mask:
588 intrinsicName = "x86.avx512fp16.fpclass.ph.512";
589 break;
590 case X86::BI__builtin_ia32_fpclassps128_mask:
591 intrinsicName = "x86.avx512.fpclass.ps.128";
592 break;
593 case X86::BI__builtin_ia32_fpclassps256_mask:
594 intrinsicName = "x86.avx512.fpclass.ps.256";
595 break;
596 case X86::BI__builtin_ia32_fpclassps512_mask:
597 intrinsicName = "x86.avx512.fpclass.ps.512";
598 break;
599 case X86::BI__builtin_ia32_fpclasspd128_mask:
600 intrinsicName = "x86.avx512.fpclass.pd.128";
601 break;
602 case X86::BI__builtin_ia32_fpclasspd256_mask:
603 intrinsicName = "x86.avx512.fpclass.pd.256";
604 break;
605 case X86::BI__builtin_ia32_fpclasspd512_mask:
606 intrinsicName = "x86.avx512.fpclass.pd.512";
607 break;
608 }
609
610 auto cmpResultTy = cir::VectorType::get(builder.getSIntNTy(1), numElts);
611 mlir::Value fpclass =
612 builder.emitIntrinsicCallOp(loc, intrinsicName, cmpResultTy, ops);
613 return emitX86MaskedCompareResult(builder, fpclass, numElts, maskIn, loc);
614}
615
616static mlir::Value emitX86Aes(CIRGenBuilderTy &builder, mlir::Location loc,
617 llvm::StringRef intrinsicName, mlir::Type retType,
619 // Create return struct type and call intrinsic function.
620 mlir::Type vecType =
621 mlir::cast<cir::PointerType>(ops[0].getType()).getPointee();
622 cir::RecordType rstRecTy = builder.getAnonRecordTy({retType, vecType});
623 mlir::Value rstValueRec = builder.emitIntrinsicCallOp(
624 loc, intrinsicName, rstRecTy, mlir::ValueRange{ops[1], ops[2]});
625
626 // Extract the first return value and truncate it to 1 bit, then cast result
627 // to bool value.
628 mlir::Value flag =
629 cir::ExtractMemberOp::create(builder, loc, rstValueRec, /*index=*/0);
630 mlir::Value flagBit0 = builder.createCast(loc, cir::CastKind::integral, flag,
631 builder.getUIntNTy(1));
632 mlir::Value succ = builder.createCast(loc, cir::CastKind::int_to_bool,
633 flagBit0, builder.getBoolTy());
634
635 // Extract the second return value, store it to output address if success.
636 mlir::Value out =
637 cir::ExtractMemberOp::create(builder, loc, rstValueRec, /*index=*/1);
638 Address outAddr(ops[0], /*align=*/CharUnits::fromQuantity(16));
639 cir::IfOp::create(
640 builder, loc, succ, /*withElseRegion=*/true,
641 /*thenBuilder=*/
642 [&](mlir::OpBuilder &b, mlir::Location) {
643 builder.createStore(loc, out, outAddr);
644 builder.createYield(loc);
645 },
646 /*elseBuilder=*/
647 [&](mlir::OpBuilder &b, mlir::Location) {
648 mlir::Value zero = builder.getNullValue(vecType, loc);
649 builder.createStore(loc, zero, outAddr);
650 builder.createYield(loc);
651 });
652
653 return cir::ExtractMemberOp::create(builder, loc, rstValueRec, /*index=*/0);
654}
655
656static mlir::Value emitX86Aeswide(CIRGenBuilderTy &builder, mlir::Location loc,
657 llvm::StringRef intrinsicName,
658 mlir::Type retType,
660 mlir::Type vecType =
661 mlir::cast<cir::PointerType>(ops[1].getType()).getPointee();
662
663 // Create struct for return type and load input arguments, then call
664 // intrinsic function.
665 mlir::Type recTypes[9] = {retType, vecType, vecType, vecType, vecType,
666 vecType, vecType, vecType, vecType};
667 mlir::Value arguments[9];
668 arguments[0] = ops[2];
669 for (int i = 0; i < 8; i++) {
670 // Loading each vector argument from input address.
671 cir::ConstantOp idx = builder.getUInt32(i, loc);
672 mlir::Value nextInElePtr =
673 builder.getArrayElement(loc, loc, ops[1], vecType, idx,
674 /*shouldDecay=*/false);
675 arguments[i + 1] =
676 builder.createAlignedLoad(loc, vecType, nextInElePtr,
677 /*align=*/CharUnits::fromQuantity(16));
678 }
679 cir::RecordType rstRecTy = builder.getAnonRecordTy(recTypes);
680 mlir::Value rstValueRec =
681 builder.emitIntrinsicCallOp(loc, intrinsicName, rstRecTy, arguments);
682
683 // Extract the first return value and truncate it to 1 bit, then cast result
684 // to bool value.
685 mlir::Value flag =
686 cir::ExtractMemberOp::create(builder, loc, rstValueRec, /*index=*/0);
687 mlir::Value flagBit0 = builder.createCast(loc, cir::CastKind::integral, flag,
688 builder.getUIntNTy(1));
689 mlir::Value succ = builder.createCast(loc, cir::CastKind::int_to_bool,
690 flagBit0, builder.getBoolTy());
691
692 // Extract other return values, store those to output address if success.
693 cir::IfOp::create(
694 builder, loc, succ, /*withElseRegion=*/true,
695 /*thenBuilder=*/
696 [&](mlir::OpBuilder &b, mlir::Location) {
697 for (int i = 0; i < 8; i++) {
698 mlir::Value out =
699 cir::ExtractMemberOp::create(builder, loc, rstValueRec,
700 /*index=*/i + 1);
701 cir::ConstantOp idx = builder.getUInt32(i, loc);
702 mlir::Value nextOutEleAddr =
703 builder.getArrayElement(loc, loc, ops[0], vecType, idx,
704 /*shouldDecay=*/false);
705 Address outAddr(nextOutEleAddr,
706 /*align=*/CharUnits::fromQuantity(16));
707 builder.createStore(loc, out, outAddr);
708 }
709 builder.createYield(loc);
710 },
711 /*elseBuilder=*/
712 [&](mlir::OpBuilder &b, mlir::Location) {
713 mlir::Value zero = builder.getNullValue(vecType, loc);
714 for (int i = 0; i < 8; i++) {
715 cir::ConstantOp idx = builder.getUInt32(i, loc);
716 mlir::Value nextOutEleAddr =
717 builder.getArrayElement(loc, loc, ops[0], vecType, idx,
718 /*shouldDecay=*/false);
719 Address outAddr(nextOutEleAddr,
720 /*align=*/CharUnits::fromQuantity(16));
721 builder.createStore(loc, zero, outAddr);
722 }
723 builder.createYield(loc);
724 });
725
726 return cir::ExtractMemberOp::create(builder, loc, rstValueRec, /*index=*/0);
727}
728
729std::optional<mlir::Value>
731 if (builtinID == Builtin::BI__builtin_cpu_is) {
732 cgm.errorNYI(expr->getSourceRange(), "__builtin_cpu_is");
733 return mlir::Value{};
734 }
735 if (builtinID == Builtin::BI__builtin_cpu_supports) {
736 cgm.errorNYI(expr->getSourceRange(), "__builtin_cpu_supports");
737 return mlir::Value{};
738 }
739 if (builtinID == Builtin::BI__builtin_cpu_init) {
740 cgm.errorNYI(expr->getSourceRange(), "__builtin_cpu_init");
741 return mlir::Value{};
742 }
743
744 // Handle MSVC intrinsics before argument evaluation to prevent double
745 // evaluation.
747
748 // Find out if any arguments are required to be integer constant expressions.
750
751 // The operands of the builtin call
753
754 // `ICEArguments` is a bitmap indicating whether the argument at the i-th bit
755 // is required to be a constant integer expression.
756 unsigned iceArguments = 0;
758 getContext().GetBuiltinType(builtinID, error, &iceArguments);
759 assert(error == ASTContext::GE_None && "Error while getting builtin type.");
760
761 for (auto [idx, arg] : llvm::enumerate(expr->arguments()))
762 ops.push_back(emitScalarOrConstFoldImmArg(iceArguments, idx, arg));
763
764 CIRGenBuilderTy &builder = getBuilder();
765 mlir::Type voidTy = builder.getVoidTy();
766
767 switch (builtinID) {
768 default:
769 return std::nullopt;
770 case X86::BI_mm_clflush:
771 return builder.emitIntrinsicCallOp(getLoc(expr->getExprLoc()),
772 "x86.sse2.clflush", voidTy, ops[0]);
773 case X86::BI_mm_lfence:
774 return builder.emitIntrinsicCallOp(getLoc(expr->getExprLoc()),
775 "x86.sse2.lfence", voidTy);
776 case X86::BI_mm_pause:
777 return builder.emitIntrinsicCallOp(getLoc(expr->getExprLoc()),
778 "x86.sse2.pause", voidTy);
779 case X86::BI_mm_mfence:
780 return builder.emitIntrinsicCallOp(getLoc(expr->getExprLoc()),
781 "x86.sse2.mfence", voidTy);
782 case X86::BI_mm_sfence:
783 return builder.emitIntrinsicCallOp(getLoc(expr->getExprLoc()),
784 "x86.sse.sfence", voidTy);
785 case X86::BI_mm_prefetch:
786 case X86::BI_m_prefetch:
787 case X86::BI_m_prefetchw:
788 return emitPrefetch(*this, builtinID, expr, ops);
789 case X86::BI__rdtsc:
790 case X86::BI__builtin_ia32_rdtscp: {
791 cgm.errorNYI(expr->getSourceRange(),
792 std::string("unimplemented X86 builtin call: ") +
793 getContext().BuiltinInfo.getName(builtinID));
794 return mlir::Value{};
795 }
796 case X86::BI__builtin_ia32_lzcnt_u16:
797 case X86::BI__builtin_ia32_lzcnt_u32:
798 case X86::BI__builtin_ia32_lzcnt_u64: {
799 mlir::Location loc = getLoc(expr->getExprLoc());
800 mlir::Value isZeroPoison = builder.getFalse(loc);
801 return builder.emitIntrinsicCallOp(loc, "ctlz", ops[0].getType(),
802 mlir::ValueRange{ops[0], isZeroPoison});
803 }
804 case X86::BI__builtin_ia32_tzcnt_u16:
805 case X86::BI__builtin_ia32_tzcnt_u32:
806 case X86::BI__builtin_ia32_tzcnt_u64: {
807 mlir::Location loc = getLoc(expr->getExprLoc());
808 mlir::Value isZeroPoison = builder.getFalse(loc);
809 return builder.emitIntrinsicCallOp(loc, "cttz", ops[0].getType(),
810 mlir::ValueRange{ops[0], isZeroPoison});
811 }
812 case X86::BI__builtin_ia32_undef128:
813 case X86::BI__builtin_ia32_undef256:
814 case X86::BI__builtin_ia32_undef512:
815 // The x86 definition of "undef" is not the same as the LLVM definition
816 // (PR32176). We leave optimizing away an unnecessary zero constant to the
817 // IR optimizer and backend.
818 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
819 // value, we should use that here instead of a zero.
820 return builder.getNullValue(convertType(expr->getType()),
821 getLoc(expr->getExprLoc()));
822 case X86::BI__builtin_ia32_vec_ext_v4hi:
823 case X86::BI__builtin_ia32_vec_ext_v16qi:
824 case X86::BI__builtin_ia32_vec_ext_v8hi:
825 case X86::BI__builtin_ia32_vec_ext_v4si:
826 case X86::BI__builtin_ia32_vec_ext_v4sf:
827 case X86::BI__builtin_ia32_vec_ext_v2di:
828 case X86::BI__builtin_ia32_vec_ext_v32qi:
829 case X86::BI__builtin_ia32_vec_ext_v16hi:
830 case X86::BI__builtin_ia32_vec_ext_v8si:
831 case X86::BI__builtin_ia32_vec_ext_v4di: {
832 unsigned numElts = cast<cir::VectorType>(ops[0].getType()).getSize();
833
834 uint64_t index = getZExtIntValueFromConstOp(ops[1]);
835 index &= numElts - 1;
836
837 cir::ConstantOp indexVal =
838 builder.getUInt64(index, getLoc(expr->getExprLoc()));
839
840 // These builtins exist so we can ensure the index is an ICE and in range.
841 // Otherwise we could just do this in the header file.
842 return cir::VecExtractOp::create(builder, getLoc(expr->getExprLoc()),
843 ops[0], indexVal);
844 }
845 case X86::BI__builtin_ia32_vec_set_v4hi:
846 case X86::BI__builtin_ia32_vec_set_v16qi:
847 case X86::BI__builtin_ia32_vec_set_v8hi:
848 case X86::BI__builtin_ia32_vec_set_v4si:
849 case X86::BI__builtin_ia32_vec_set_v2di:
850 case X86::BI__builtin_ia32_vec_set_v32qi:
851 case X86::BI__builtin_ia32_vec_set_v16hi:
852 case X86::BI__builtin_ia32_vec_set_v8si:
853 case X86::BI__builtin_ia32_vec_set_v4di: {
854 return emitVecInsert(builder, getLoc(expr->getExprLoc()), ops[0], ops[1],
855 ops[2]);
856 }
857 case X86::BI__builtin_ia32_kunpckhi:
858 return emitX86MaskUnpack(builder, getLoc(expr->getExprLoc()),
859 "x86.avx512.kunpackb", ops);
860 case X86::BI__builtin_ia32_kunpcksi:
861 return emitX86MaskUnpack(builder, getLoc(expr->getExprLoc()),
862 "x86.avx512.kunpackw", ops);
863 case X86::BI__builtin_ia32_kunpckdi:
864 return emitX86MaskUnpack(builder, getLoc(expr->getExprLoc()),
865 "x86.avx512.kunpackd", ops);
866 case X86::BI_mm_setcsr:
867 case X86::BI__builtin_ia32_ldmxcsr: {
868 mlir::Location loc = getLoc(expr->getExprLoc());
869 Address tmp = createMemTemp(expr->getArg(0)->getType(), loc);
870 builder.createStore(loc, ops[0], tmp);
871 return builder.emitIntrinsicCallOp(loc, "x86.sse.ldmxcsr",
872 builder.getVoidTy(), tmp.getPointer());
873 }
874 case X86::BI_mm_getcsr:
875 case X86::BI__builtin_ia32_stmxcsr: {
876 mlir::Location loc = getLoc(expr->getExprLoc());
877 Address tmp = createMemTemp(expr->getType(), loc);
878 builder.emitIntrinsicCallOp(loc, "x86.sse.stmxcsr", builder.getVoidTy(),
879 tmp.getPointer());
880 return builder.createLoad(loc, tmp);
881 }
882 case X86::BI__builtin_ia32_xsave:
883 case X86::BI__builtin_ia32_xsave64:
884 case X86::BI__builtin_ia32_xrstor:
885 case X86::BI__builtin_ia32_xrstor64:
886 case X86::BI__builtin_ia32_xsaveopt:
887 case X86::BI__builtin_ia32_xsaveopt64:
888 case X86::BI__builtin_ia32_xrstors:
889 case X86::BI__builtin_ia32_xrstors64:
890 case X86::BI__builtin_ia32_xsavec:
891 case X86::BI__builtin_ia32_xsavec64:
892 case X86::BI__builtin_ia32_xsaves:
893 case X86::BI__builtin_ia32_xsaves64:
894 case X86::BI__builtin_ia32_xsetbv:
895 case X86::BI_xsetbv: {
896 mlir::Location loc = getLoc(expr->getExprLoc());
897 StringRef intrinsicName;
898 switch (builtinID) {
899 default:
900 llvm_unreachable("Unexpected builtin");
901 case X86::BI__builtin_ia32_xsave:
902 intrinsicName = "x86.xsave";
903 break;
904 case X86::BI__builtin_ia32_xsave64:
905 intrinsicName = "x86.xsave64";
906 break;
907 case X86::BI__builtin_ia32_xrstor:
908 intrinsicName = "x86.xrstor";
909 break;
910 case X86::BI__builtin_ia32_xrstor64:
911 intrinsicName = "x86.xrstor64";
912 break;
913 case X86::BI__builtin_ia32_xsaveopt:
914 intrinsicName = "x86.xsaveopt";
915 break;
916 case X86::BI__builtin_ia32_xsaveopt64:
917 intrinsicName = "x86.xsaveopt64";
918 break;
919 case X86::BI__builtin_ia32_xrstors:
920 intrinsicName = "x86.xrstors";
921 break;
922 case X86::BI__builtin_ia32_xrstors64:
923 intrinsicName = "x86.xrstors64";
924 break;
925 case X86::BI__builtin_ia32_xsavec:
926 intrinsicName = "x86.xsavec";
927 break;
928 case X86::BI__builtin_ia32_xsavec64:
929 intrinsicName = "x86.xsavec64";
930 break;
931 case X86::BI__builtin_ia32_xsaves:
932 intrinsicName = "x86.xsaves";
933 break;
934 case X86::BI__builtin_ia32_xsaves64:
935 intrinsicName = "x86.xsaves64";
936 break;
937 case X86::BI__builtin_ia32_xsetbv:
938 case X86::BI_xsetbv:
939 intrinsicName = "x86.xsetbv";
940 break;
941 }
942
943 // The xsave family of instructions take a 64-bit mask that specifies
944 // which processor state components to save/restore. The hardware expects
945 // this mask split into two 32-bit registers: EDX (high 32 bits) and
946 // EAX (low 32 bits).
947 mlir::Type i32Ty = builder.getSInt32Ty();
948
949 // Mhi = (uint32_t)(ops[1] >> 32) - extract high 32 bits via right shift
950 cir::ConstantOp shift32 = builder.getSInt64(32, loc);
951 mlir::Value mhi = builder.createShift(loc, ops[1], shift32.getResult(),
952 /*isShiftLeft=*/false);
953 mhi = builder.createIntCast(mhi, i32Ty);
954
955 // Mlo = (uint32_t)ops[1] - extract low 32 bits by truncation
956 mlir::Value mlo = builder.createIntCast(ops[1], i32Ty);
957
958 return builder.emitIntrinsicCallOp(loc, intrinsicName, voidTy,
959 mlir::ValueRange{ops[0], mhi, mlo});
960 }
961 case X86::BI__builtin_ia32_xgetbv:
962 case X86::BI_xgetbv:
963 // xgetbv reads the extended control register specified by ops[0] (ECX)
964 // and returns the 64-bit value
965 return builder.emitIntrinsicCallOp(getLoc(expr->getExprLoc()), "x86.xgetbv",
966 builder.getUInt64Ty(), ops[0]);
967 case X86::BI__builtin_ia32_storedqudi128_mask:
968 case X86::BI__builtin_ia32_storedqusi128_mask:
969 case X86::BI__builtin_ia32_storedquhi128_mask:
970 case X86::BI__builtin_ia32_storedquqi128_mask:
971 case X86::BI__builtin_ia32_storeupd128_mask:
972 case X86::BI__builtin_ia32_storeups128_mask:
973 case X86::BI__builtin_ia32_storedqudi256_mask:
974 case X86::BI__builtin_ia32_storedqusi256_mask:
975 case X86::BI__builtin_ia32_storedquhi256_mask:
976 case X86::BI__builtin_ia32_storedquqi256_mask:
977 case X86::BI__builtin_ia32_storeupd256_mask:
978 case X86::BI__builtin_ia32_storeups256_mask:
979 case X86::BI__builtin_ia32_storedqudi512_mask:
980 case X86::BI__builtin_ia32_storedqusi512_mask:
981 case X86::BI__builtin_ia32_storedquhi512_mask:
982 case X86::BI__builtin_ia32_storedquqi512_mask:
983 case X86::BI__builtin_ia32_storeupd512_mask:
984 case X86::BI__builtin_ia32_storeups512_mask:
985 case X86::BI__builtin_ia32_storesbf16128_mask:
986 case X86::BI__builtin_ia32_storesh128_mask:
987 case X86::BI__builtin_ia32_storess128_mask:
988 case X86::BI__builtin_ia32_storesd128_mask:
989 cgm.errorNYI(expr->getSourceRange(),
990 std::string("unimplemented x86 builtin call: ") +
991 getContext().BuiltinInfo.getName(builtinID));
992 return mlir::Value{};
993 case X86::BI__builtin_ia32_cvtmask2b128:
994 case X86::BI__builtin_ia32_cvtmask2b256:
995 case X86::BI__builtin_ia32_cvtmask2b512:
996 case X86::BI__builtin_ia32_cvtmask2w128:
997 case X86::BI__builtin_ia32_cvtmask2w256:
998 case X86::BI__builtin_ia32_cvtmask2w512:
999 case X86::BI__builtin_ia32_cvtmask2d128:
1000 case X86::BI__builtin_ia32_cvtmask2d256:
1001 case X86::BI__builtin_ia32_cvtmask2d512:
1002 case X86::BI__builtin_ia32_cvtmask2q128:
1003 case X86::BI__builtin_ia32_cvtmask2q256:
1004 case X86::BI__builtin_ia32_cvtmask2q512:
1005 return emitX86SExtMask(this->getBuilder(), ops[0],
1006 convertType(expr->getType()),
1007 getLoc(expr->getExprLoc()));
1008 case X86::BI__builtin_ia32_cvtb2mask128:
1009 case X86::BI__builtin_ia32_cvtb2mask256:
1010 case X86::BI__builtin_ia32_cvtb2mask512:
1011 case X86::BI__builtin_ia32_cvtw2mask128:
1012 case X86::BI__builtin_ia32_cvtw2mask256:
1013 case X86::BI__builtin_ia32_cvtw2mask512:
1014 case X86::BI__builtin_ia32_cvtd2mask128:
1015 case X86::BI__builtin_ia32_cvtd2mask256:
1016 case X86::BI__builtin_ia32_cvtd2mask512:
1017 case X86::BI__builtin_ia32_cvtq2mask128:
1018 case X86::BI__builtin_ia32_cvtq2mask256:
1019 case X86::BI__builtin_ia32_cvtq2mask512:
1020 return emitX86ConvertToMask(*this, this->getBuilder(), ops[0],
1021 getLoc(expr->getExprLoc()));
1022 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
1023 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
1024 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
1025 case X86::BI__builtin_ia32_vcvtw2ph512_mask:
1026 case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
1027 case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
1028 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
1029 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
1030 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
1031 case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
1032 case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
1033 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
1034 case X86::BI__builtin_ia32_vfmaddsh3_mask:
1035 case X86::BI__builtin_ia32_vfmaddss3_mask:
1036 case X86::BI__builtin_ia32_vfmaddsd3_mask:
1037 case X86::BI__builtin_ia32_vfmaddsh3_maskz:
1038 case X86::BI__builtin_ia32_vfmaddss3_maskz:
1039 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
1040 case X86::BI__builtin_ia32_vfmaddsh3_mask3:
1041 case X86::BI__builtin_ia32_vfmaddss3_mask3:
1042 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
1043 case X86::BI__builtin_ia32_vfmsubsh3_mask3:
1044 case X86::BI__builtin_ia32_vfmsubss3_mask3:
1045 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
1046 case X86::BI__builtin_ia32_vfmaddph512_mask:
1047 case X86::BI__builtin_ia32_vfmaddph512_maskz:
1048 case X86::BI__builtin_ia32_vfmaddph512_mask3:
1049 case X86::BI__builtin_ia32_vfmaddps512_mask:
1050 case X86::BI__builtin_ia32_vfmaddps512_maskz:
1051 case X86::BI__builtin_ia32_vfmaddps512_mask3:
1052 case X86::BI__builtin_ia32_vfmsubps512_mask3:
1053 case X86::BI__builtin_ia32_vfmaddpd512_mask:
1054 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
1055 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
1056 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
1057 case X86::BI__builtin_ia32_vfmsubph512_mask3:
1058 case X86::BI__builtin_ia32_vfmaddsubph512_mask:
1059 case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
1060 case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
1061 case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
1062 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
1063 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
1064 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
1065 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
1066 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
1067 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
1068 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
1069 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
1070 case X86::BI__builtin_ia32_movdqa32store128_mask:
1071 case X86::BI__builtin_ia32_movdqa64store128_mask:
1072 case X86::BI__builtin_ia32_storeaps128_mask:
1073 case X86::BI__builtin_ia32_storeapd128_mask:
1074 case X86::BI__builtin_ia32_movdqa32store256_mask:
1075 case X86::BI__builtin_ia32_movdqa64store256_mask:
1076 case X86::BI__builtin_ia32_storeaps256_mask:
1077 case X86::BI__builtin_ia32_storeapd256_mask:
1078 case X86::BI__builtin_ia32_movdqa32store512_mask:
1079 case X86::BI__builtin_ia32_movdqa64store512_mask:
1080 case X86::BI__builtin_ia32_storeaps512_mask:
1081 case X86::BI__builtin_ia32_storeapd512_mask:
1082 case X86::BI__builtin_ia32_loadups128_mask:
1083 case X86::BI__builtin_ia32_loadups256_mask:
1084 case X86::BI__builtin_ia32_loadups512_mask:
1085 case X86::BI__builtin_ia32_loadupd128_mask:
1086 case X86::BI__builtin_ia32_loadupd256_mask:
1087 case X86::BI__builtin_ia32_loadupd512_mask:
1088 case X86::BI__builtin_ia32_loaddquqi128_mask:
1089 case X86::BI__builtin_ia32_loaddquqi256_mask:
1090 case X86::BI__builtin_ia32_loaddquqi512_mask:
1091 case X86::BI__builtin_ia32_loaddquhi128_mask:
1092 case X86::BI__builtin_ia32_loaddquhi256_mask:
1093 case X86::BI__builtin_ia32_loaddquhi512_mask:
1094 case X86::BI__builtin_ia32_loaddqusi128_mask:
1095 case X86::BI__builtin_ia32_loaddqusi256_mask:
1096 case X86::BI__builtin_ia32_loaddqusi512_mask:
1097 case X86::BI__builtin_ia32_loaddqudi128_mask:
1098 case X86::BI__builtin_ia32_loaddqudi256_mask:
1099 case X86::BI__builtin_ia32_loaddqudi512_mask:
1100 case X86::BI__builtin_ia32_loadsbf16128_mask:
1101 case X86::BI__builtin_ia32_loadsh128_mask:
1102 case X86::BI__builtin_ia32_loadss128_mask:
1103 case X86::BI__builtin_ia32_loadsd128_mask:
1104 case X86::BI__builtin_ia32_loadaps128_mask:
1105 case X86::BI__builtin_ia32_loadaps256_mask:
1106 case X86::BI__builtin_ia32_loadaps512_mask:
1107 case X86::BI__builtin_ia32_loadapd128_mask:
1108 case X86::BI__builtin_ia32_loadapd256_mask:
1109 case X86::BI__builtin_ia32_loadapd512_mask:
1110 case X86::BI__builtin_ia32_movdqa32load128_mask:
1111 case X86::BI__builtin_ia32_movdqa32load256_mask:
1112 case X86::BI__builtin_ia32_movdqa32load512_mask:
1113 case X86::BI__builtin_ia32_movdqa64load128_mask:
1114 case X86::BI__builtin_ia32_movdqa64load256_mask:
1115 case X86::BI__builtin_ia32_movdqa64load512_mask:
1116 case X86::BI__builtin_ia32_expandloaddf128_mask:
1117 case X86::BI__builtin_ia32_expandloaddf256_mask:
1118 case X86::BI__builtin_ia32_expandloaddf512_mask:
1119 case X86::BI__builtin_ia32_expandloadsf128_mask:
1120 case X86::BI__builtin_ia32_expandloadsf256_mask:
1121 case X86::BI__builtin_ia32_expandloadsf512_mask:
1122 case X86::BI__builtin_ia32_expandloaddi128_mask:
1123 case X86::BI__builtin_ia32_expandloaddi256_mask:
1124 case X86::BI__builtin_ia32_expandloaddi512_mask:
1125 case X86::BI__builtin_ia32_expandloadsi128_mask:
1126 case X86::BI__builtin_ia32_expandloadsi256_mask:
1127 case X86::BI__builtin_ia32_expandloadsi512_mask:
1128 case X86::BI__builtin_ia32_expandloadhi128_mask:
1129 case X86::BI__builtin_ia32_expandloadhi256_mask:
1130 case X86::BI__builtin_ia32_expandloadhi512_mask:
1131 case X86::BI__builtin_ia32_expandloadqi128_mask:
1132 case X86::BI__builtin_ia32_expandloadqi256_mask:
1133 case X86::BI__builtin_ia32_expandloadqi512_mask:
1134 case X86::BI__builtin_ia32_compressstoredf128_mask:
1135 case X86::BI__builtin_ia32_compressstoredf256_mask:
1136 case X86::BI__builtin_ia32_compressstoredf512_mask:
1137 case X86::BI__builtin_ia32_compressstoresf128_mask:
1138 case X86::BI__builtin_ia32_compressstoresf256_mask:
1139 case X86::BI__builtin_ia32_compressstoresf512_mask:
1140 case X86::BI__builtin_ia32_compressstoredi128_mask:
1141 case X86::BI__builtin_ia32_compressstoredi256_mask:
1142 case X86::BI__builtin_ia32_compressstoredi512_mask:
1143 case X86::BI__builtin_ia32_compressstoresi128_mask:
1144 case X86::BI__builtin_ia32_compressstoresi256_mask:
1145 case X86::BI__builtin_ia32_compressstoresi512_mask:
1146 case X86::BI__builtin_ia32_compressstorehi128_mask:
1147 case X86::BI__builtin_ia32_compressstorehi256_mask:
1148 case X86::BI__builtin_ia32_compressstorehi512_mask:
1149 case X86::BI__builtin_ia32_compressstoreqi128_mask:
1150 case X86::BI__builtin_ia32_compressstoreqi256_mask:
1151 case X86::BI__builtin_ia32_compressstoreqi512_mask:
1152 cgm.errorNYI(expr->getSourceRange(),
1153 std::string("unimplemented X86 builtin call: ") +
1154 getContext().BuiltinInfo.getName(builtinID));
1155 return mlir::Value{};
1156 case X86::BI__builtin_ia32_expanddf128_mask:
1157 case X86::BI__builtin_ia32_expanddf256_mask:
1158 case X86::BI__builtin_ia32_expanddf512_mask:
1159 case X86::BI__builtin_ia32_expandsf128_mask:
1160 case X86::BI__builtin_ia32_expandsf256_mask:
1161 case X86::BI__builtin_ia32_expandsf512_mask:
1162 case X86::BI__builtin_ia32_expanddi128_mask:
1163 case X86::BI__builtin_ia32_expanddi256_mask:
1164 case X86::BI__builtin_ia32_expanddi512_mask:
1165 case X86::BI__builtin_ia32_expandsi128_mask:
1166 case X86::BI__builtin_ia32_expandsi256_mask:
1167 case X86::BI__builtin_ia32_expandsi512_mask:
1168 case X86::BI__builtin_ia32_expandhi128_mask:
1169 case X86::BI__builtin_ia32_expandhi256_mask:
1170 case X86::BI__builtin_ia32_expandhi512_mask:
1171 case X86::BI__builtin_ia32_expandqi128_mask:
1172 case X86::BI__builtin_ia32_expandqi256_mask:
1173 case X86::BI__builtin_ia32_expandqi512_mask: {
1174 mlir::Location loc = getLoc(expr->getExprLoc());
1175 return emitX86CompressExpand(builder, loc, ops[0], ops[1], ops[2],
1176 "x86.avx512.mask.expand");
1177 }
1178 case X86::BI__builtin_ia32_compressdf128_mask:
1179 case X86::BI__builtin_ia32_compressdf256_mask:
1180 case X86::BI__builtin_ia32_compressdf512_mask:
1181 case X86::BI__builtin_ia32_compresssf128_mask:
1182 case X86::BI__builtin_ia32_compresssf256_mask:
1183 case X86::BI__builtin_ia32_compresssf512_mask:
1184 case X86::BI__builtin_ia32_compressdi128_mask:
1185 case X86::BI__builtin_ia32_compressdi256_mask:
1186 case X86::BI__builtin_ia32_compressdi512_mask:
1187 case X86::BI__builtin_ia32_compresssi128_mask:
1188 case X86::BI__builtin_ia32_compresssi256_mask:
1189 case X86::BI__builtin_ia32_compresssi512_mask:
1190 case X86::BI__builtin_ia32_compresshi128_mask:
1191 case X86::BI__builtin_ia32_compresshi256_mask:
1192 case X86::BI__builtin_ia32_compresshi512_mask:
1193 case X86::BI__builtin_ia32_compressqi128_mask:
1194 case X86::BI__builtin_ia32_compressqi256_mask:
1195 case X86::BI__builtin_ia32_compressqi512_mask: {
1196 mlir::Location loc = getLoc(expr->getExprLoc());
1197 return emitX86CompressExpand(builder, loc, ops[0], ops[1], ops[2],
1198 "x86.avx512.mask.compress");
1199 }
1200 case X86::BI__builtin_ia32_gather3div2df:
1201 case X86::BI__builtin_ia32_gather3div2di:
1202 case X86::BI__builtin_ia32_gather3div4df:
1203 case X86::BI__builtin_ia32_gather3div4di:
1204 case X86::BI__builtin_ia32_gather3div4sf:
1205 case X86::BI__builtin_ia32_gather3div4si:
1206 case X86::BI__builtin_ia32_gather3div8sf:
1207 case X86::BI__builtin_ia32_gather3div8si:
1208 case X86::BI__builtin_ia32_gather3siv2df:
1209 case X86::BI__builtin_ia32_gather3siv2di:
1210 case X86::BI__builtin_ia32_gather3siv4df:
1211 case X86::BI__builtin_ia32_gather3siv4di:
1212 case X86::BI__builtin_ia32_gather3siv4sf:
1213 case X86::BI__builtin_ia32_gather3siv4si:
1214 case X86::BI__builtin_ia32_gather3siv8sf:
1215 case X86::BI__builtin_ia32_gather3siv8si:
1216 case X86::BI__builtin_ia32_gathersiv8df:
1217 case X86::BI__builtin_ia32_gathersiv16sf:
1218 case X86::BI__builtin_ia32_gatherdiv8df:
1219 case X86::BI__builtin_ia32_gatherdiv16sf:
1220 case X86::BI__builtin_ia32_gathersiv8di:
1221 case X86::BI__builtin_ia32_gathersiv16si:
1222 case X86::BI__builtin_ia32_gatherdiv8di:
1223 case X86::BI__builtin_ia32_gatherdiv16si: {
1224 StringRef intrinsicName;
1225 switch (builtinID) {
1226 default:
1227 llvm_unreachable("Unexpected builtin");
1228 case X86::BI__builtin_ia32_gather3div2df:
1229 intrinsicName = "x86.avx512.mask.gather3div2.df";
1230 break;
1231 case X86::BI__builtin_ia32_gather3div2di:
1232 intrinsicName = "x86.avx512.mask.gather3div2.di";
1233 break;
1234 case X86::BI__builtin_ia32_gather3div4df:
1235 intrinsicName = "x86.avx512.mask.gather3div4.df";
1236 break;
1237 case X86::BI__builtin_ia32_gather3div4di:
1238 intrinsicName = "x86.avx512.mask.gather3div4.di";
1239 break;
1240 case X86::BI__builtin_ia32_gather3div4sf:
1241 intrinsicName = "x86.avx512.mask.gather3div4.sf";
1242 break;
1243 case X86::BI__builtin_ia32_gather3div4si:
1244 intrinsicName = "x86.avx512.mask.gather3div4.si";
1245 break;
1246 case X86::BI__builtin_ia32_gather3div8sf:
1247 intrinsicName = "x86.avx512.mask.gather3div8.sf";
1248 break;
1249 case X86::BI__builtin_ia32_gather3div8si:
1250 intrinsicName = "x86.avx512.mask.gather3div8.si";
1251 break;
1252 case X86::BI__builtin_ia32_gather3siv2df:
1253 intrinsicName = "x86.avx512.mask.gather3siv2.df";
1254 break;
1255 case X86::BI__builtin_ia32_gather3siv2di:
1256 intrinsicName = "x86.avx512.mask.gather3siv2.di";
1257 break;
1258 case X86::BI__builtin_ia32_gather3siv4df:
1259 intrinsicName = "x86.avx512.mask.gather3siv4.df";
1260 break;
1261 case X86::BI__builtin_ia32_gather3siv4di:
1262 intrinsicName = "x86.avx512.mask.gather3siv4.di";
1263 break;
1264 case X86::BI__builtin_ia32_gather3siv4sf:
1265 intrinsicName = "x86.avx512.mask.gather3siv4.sf";
1266 break;
1267 case X86::BI__builtin_ia32_gather3siv4si:
1268 intrinsicName = "x86.avx512.mask.gather3siv4.si";
1269 break;
1270 case X86::BI__builtin_ia32_gather3siv8sf:
1271 intrinsicName = "x86.avx512.mask.gather3siv8.sf";
1272 break;
1273 case X86::BI__builtin_ia32_gather3siv8si:
1274 intrinsicName = "x86.avx512.mask.gather3siv8.si";
1275 break;
1276 case X86::BI__builtin_ia32_gathersiv8df:
1277 intrinsicName = "x86.avx512.mask.gather.dpd.512";
1278 break;
1279 case X86::BI__builtin_ia32_gathersiv16sf:
1280 intrinsicName = "x86.avx512.mask.gather.dps.512";
1281 break;
1282 case X86::BI__builtin_ia32_gatherdiv8df:
1283 intrinsicName = "x86.avx512.mask.gather.qpd.512";
1284 break;
1285 case X86::BI__builtin_ia32_gatherdiv16sf:
1286 intrinsicName = "x86.avx512.mask.gather.qps.512";
1287 break;
1288 case X86::BI__builtin_ia32_gathersiv8di:
1289 intrinsicName = "x86.avx512.mask.gather.dpq.512";
1290 break;
1291 case X86::BI__builtin_ia32_gathersiv16si:
1292 intrinsicName = "x86.avx512.mask.gather.dpi.512";
1293 break;
1294 case X86::BI__builtin_ia32_gatherdiv8di:
1295 intrinsicName = "x86.avx512.mask.gather.qpq.512";
1296 break;
1297 case X86::BI__builtin_ia32_gatherdiv16si:
1298 intrinsicName = "x86.avx512.mask.gather.qpi.512";
1299 break;
1300 }
1301
1302 mlir::Location loc = getLoc(expr->getExprLoc());
1303 unsigned minElts =
1304 std::min(cast<cir::VectorType>(ops[0].getType()).getSize(),
1305 cast<cir::VectorType>(ops[2].getType()).getSize());
1306 ops[3] = getMaskVecValue(builder, loc, ops[3], minElts);
1307 return builder.emitIntrinsicCallOp(loc, intrinsicName,
1308 convertType(expr->getType()), ops);
1309 }
1310 case X86::BI__builtin_ia32_scattersiv8df:
1311 case X86::BI__builtin_ia32_scattersiv16sf:
1312 case X86::BI__builtin_ia32_scatterdiv8df:
1313 case X86::BI__builtin_ia32_scatterdiv16sf:
1314 case X86::BI__builtin_ia32_scattersiv8di:
1315 case X86::BI__builtin_ia32_scattersiv16si:
1316 case X86::BI__builtin_ia32_scatterdiv8di:
1317 case X86::BI__builtin_ia32_scatterdiv16si:
1318 case X86::BI__builtin_ia32_scatterdiv2df:
1319 case X86::BI__builtin_ia32_scatterdiv2di:
1320 case X86::BI__builtin_ia32_scatterdiv4df:
1321 case X86::BI__builtin_ia32_scatterdiv4di:
1322 case X86::BI__builtin_ia32_scatterdiv4sf:
1323 case X86::BI__builtin_ia32_scatterdiv4si:
1324 case X86::BI__builtin_ia32_scatterdiv8sf:
1325 case X86::BI__builtin_ia32_scatterdiv8si:
1326 case X86::BI__builtin_ia32_scattersiv2df:
1327 case X86::BI__builtin_ia32_scattersiv2di:
1328 case X86::BI__builtin_ia32_scattersiv4df:
1329 case X86::BI__builtin_ia32_scattersiv4di:
1330 case X86::BI__builtin_ia32_scattersiv4sf:
1331 case X86::BI__builtin_ia32_scattersiv4si:
1332 case X86::BI__builtin_ia32_scattersiv8sf:
1333 case X86::BI__builtin_ia32_scattersiv8si: {
1334 llvm::StringRef intrinsicName;
1335 switch (builtinID) {
1336 default:
1337 llvm_unreachable("Unexpected builtin");
1338 case X86::BI__builtin_ia32_scattersiv8df:
1339 intrinsicName = "x86.avx512.mask.scatter.dpd.512";
1340 break;
1341 case X86::BI__builtin_ia32_scattersiv16sf:
1342 intrinsicName = "x86.avx512.mask.scatter.dps.512";
1343 break;
1344 case X86::BI__builtin_ia32_scatterdiv8df:
1345 intrinsicName = "x86.avx512.mask.scatter.qpd.512";
1346 break;
1347 case X86::BI__builtin_ia32_scatterdiv16sf:
1348 intrinsicName = "x86.avx512.mask.scatter.qps.512";
1349 break;
1350 case X86::BI__builtin_ia32_scattersiv8di:
1351 intrinsicName = "x86.avx512.mask.scatter.dpq.512";
1352 break;
1353 case X86::BI__builtin_ia32_scattersiv16si:
1354 intrinsicName = "x86.avx512.mask.scatter.dpi.512";
1355 break;
1356 case X86::BI__builtin_ia32_scatterdiv8di:
1357 intrinsicName = "x86.avx512.mask.scatter.qpq.512";
1358 break;
1359 case X86::BI__builtin_ia32_scatterdiv16si:
1360 intrinsicName = "x86.avx512.mask.scatter.qpi.512";
1361 break;
1362 case X86::BI__builtin_ia32_scatterdiv2df:
1363 intrinsicName = "x86.avx512.mask.scatterdiv2.df";
1364 break;
1365 case X86::BI__builtin_ia32_scatterdiv2di:
1366 intrinsicName = "x86.avx512.mask.scatterdiv2.di";
1367 break;
1368 case X86::BI__builtin_ia32_scatterdiv4df:
1369 intrinsicName = "x86.avx512.mask.scatterdiv4.df";
1370 break;
1371 case X86::BI__builtin_ia32_scatterdiv4di:
1372 intrinsicName = "x86.avx512.mask.scatterdiv4.di";
1373 break;
1374 case X86::BI__builtin_ia32_scatterdiv4sf:
1375 intrinsicName = "x86.avx512.mask.scatterdiv4.sf";
1376 break;
1377 case X86::BI__builtin_ia32_scatterdiv4si:
1378 intrinsicName = "x86.avx512.mask.scatterdiv4.si";
1379 break;
1380 case X86::BI__builtin_ia32_scatterdiv8sf:
1381 intrinsicName = "x86.avx512.mask.scatterdiv8.sf";
1382 break;
1383 case X86::BI__builtin_ia32_scatterdiv8si:
1384 intrinsicName = "x86.avx512.mask.scatterdiv8.si";
1385 break;
1386 case X86::BI__builtin_ia32_scattersiv2df:
1387 intrinsicName = "x86.avx512.mask.scattersiv2.df";
1388 break;
1389 case X86::BI__builtin_ia32_scattersiv2di:
1390 intrinsicName = "x86.avx512.mask.scattersiv2.di";
1391 break;
1392 case X86::BI__builtin_ia32_scattersiv4df:
1393 intrinsicName = "x86.avx512.mask.scattersiv4.df";
1394 break;
1395 case X86::BI__builtin_ia32_scattersiv4di:
1396 intrinsicName = "x86.avx512.mask.scattersiv4.di";
1397 break;
1398 case X86::BI__builtin_ia32_scattersiv4sf:
1399 intrinsicName = "x86.avx512.mask.scattersiv4.sf";
1400 break;
1401 case X86::BI__builtin_ia32_scattersiv4si:
1402 intrinsicName = "x86.avx512.mask.scattersiv4.si";
1403 break;
1404 case X86::BI__builtin_ia32_scattersiv8sf:
1405 intrinsicName = "x86.avx512.mask.scattersiv8.sf";
1406 break;
1407 case X86::BI__builtin_ia32_scattersiv8si:
1408 intrinsicName = "x86.avx512.mask.scattersiv8.si";
1409 break;
1410 }
1411
1412 mlir::Location loc = getLoc(expr->getExprLoc());
1413 unsigned minElts =
1414 std::min(cast<cir::VectorType>(ops[2].getType()).getSize(),
1415 cast<cir::VectorType>(ops[3].getType()).getSize());
1416 ops[1] = getMaskVecValue(builder, loc, ops[1], minElts);
1417
1418 return builder.emitIntrinsicCallOp(loc, intrinsicName,
1419 convertType(expr->getType()), ops);
1420 }
1421 case X86::BI__builtin_ia32_vextractf128_pd256:
1422 case X86::BI__builtin_ia32_vextractf128_ps256:
1423 case X86::BI__builtin_ia32_vextractf128_si256:
1424 case X86::BI__builtin_ia32_extract128i256:
1425 case X86::BI__builtin_ia32_extractf64x4_mask:
1426 case X86::BI__builtin_ia32_extractf32x4_mask:
1427 case X86::BI__builtin_ia32_extracti64x4_mask:
1428 case X86::BI__builtin_ia32_extracti32x4_mask:
1429 case X86::BI__builtin_ia32_extractf32x8_mask:
1430 case X86::BI__builtin_ia32_extracti32x8_mask:
1431 case X86::BI__builtin_ia32_extractf32x4_256_mask:
1432 case X86::BI__builtin_ia32_extracti32x4_256_mask:
1433 case X86::BI__builtin_ia32_extractf64x2_256_mask:
1434 case X86::BI__builtin_ia32_extracti64x2_256_mask:
1435 case X86::BI__builtin_ia32_extractf64x2_512_mask:
1436 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
1437 mlir::Location loc = getLoc(expr->getExprLoc());
1438 cir::VectorType dstTy = cast<cir::VectorType>(convertType(expr->getType()));
1439 unsigned numElts = dstTy.getSize();
1440 unsigned srcNumElts = cast<cir::VectorType>(ops[0].getType()).getSize();
1441 unsigned subVectors = srcNumElts / numElts;
1442 assert(llvm::isPowerOf2_32(subVectors) && "Expected power of 2 subvectors");
1443 unsigned index =
1444 ops[1].getDefiningOp<cir::ConstantOp>().getIntValue().getZExtValue();
1445
1446 index &= subVectors - 1; // Remove any extra bits.
1447 index *= numElts;
1448
1449 int64_t indices[16];
1450 std::iota(indices, indices + numElts, index);
1451
1452 mlir::Value poison =
1453 builder.getConstant(loc, cir::PoisonAttr::get(ops[0].getType()));
1454 mlir::Value res = builder.createVecShuffle(loc, ops[0], poison,
1455 ArrayRef(indices, numElts));
1456 if (ops.size() == 4)
1457 res = emitX86Select(builder, loc, ops[3], res, ops[2]);
1458
1459 return res;
1460 }
1461 case X86::BI__builtin_ia32_vinsertf128_pd256:
1462 case X86::BI__builtin_ia32_vinsertf128_ps256:
1463 case X86::BI__builtin_ia32_vinsertf128_si256:
1464 case X86::BI__builtin_ia32_insert128i256:
1465 case X86::BI__builtin_ia32_insertf64x4:
1466 case X86::BI__builtin_ia32_insertf32x4:
1467 case X86::BI__builtin_ia32_inserti64x4:
1468 case X86::BI__builtin_ia32_inserti32x4:
1469 case X86::BI__builtin_ia32_insertf32x8:
1470 case X86::BI__builtin_ia32_inserti32x8:
1471 case X86::BI__builtin_ia32_insertf32x4_256:
1472 case X86::BI__builtin_ia32_inserti32x4_256:
1473 case X86::BI__builtin_ia32_insertf64x2_256:
1474 case X86::BI__builtin_ia32_inserti64x2_256:
1475 case X86::BI__builtin_ia32_insertf64x2_512:
1476 case X86::BI__builtin_ia32_inserti64x2_512: {
1477 unsigned dstNumElts = cast<cir::VectorType>(ops[0].getType()).getSize();
1478 unsigned srcNumElts = cast<cir::VectorType>(ops[1].getType()).getSize();
1479 unsigned subVectors = dstNumElts / srcNumElts;
1480 assert(llvm::isPowerOf2_32(subVectors) && "Expected power of 2 subvectors");
1481 assert(dstNumElts <= 16);
1482
1483 uint64_t index = getZExtIntValueFromConstOp(ops[2]);
1484 index &= subVectors - 1; // Remove any extra bits.
1485 index *= srcNumElts;
1486
1487 llvm::SmallVector<int64_t, 16> mask(dstNumElts);
1488 for (unsigned i = 0; i != dstNumElts; ++i)
1489 mask[i] = (i >= srcNumElts) ? srcNumElts + (i % srcNumElts) : i;
1490
1491 mlir::Value op1 =
1492 builder.createVecShuffle(getLoc(expr->getExprLoc()), ops[1], mask);
1493
1494 for (unsigned i = 0; i != dstNumElts; ++i) {
1495 if (i >= index && i < (index + srcNumElts))
1496 mask[i] = (i - index) + dstNumElts;
1497 else
1498 mask[i] = i;
1499 }
1500
1501 return builder.createVecShuffle(getLoc(expr->getExprLoc()), ops[0], op1,
1502 mask);
1503 }
1504 case X86::BI__builtin_ia32_pmovqd512_mask:
1505 case X86::BI__builtin_ia32_pmovwb512_mask: {
1506 mlir::Value Res =
1507 builder.createIntCast(ops[0], cast<cir::VectorType>(ops[1].getType()));
1508 return emitX86Select(builder, getLoc(expr->getExprLoc()), ops[2], Res,
1509 ops[1]);
1510 }
1511 case X86::BI__builtin_ia32_pblendw128:
1512 case X86::BI__builtin_ia32_blendpd:
1513 case X86::BI__builtin_ia32_blendps:
1514 case X86::BI__builtin_ia32_blendpd256:
1515 case X86::BI__builtin_ia32_blendps256:
1516 case X86::BI__builtin_ia32_pblendw256:
1517 case X86::BI__builtin_ia32_pblendd128:
1518 case X86::BI__builtin_ia32_pblendd256: {
1519 uint32_t imm = getZExtIntValueFromConstOp(ops[2]);
1520 unsigned numElts = cast<cir::VectorType>(ops[0].getType()).getSize();
1521
1523 // If there are more than 8 elements, the immediate is used twice so make
1524 // sure we handle that.
1525 mlir::Type i32Ty = builder.getSInt32Ty();
1526 for (unsigned i = 0; i != numElts; ++i)
1527 indices.push_back(
1528 cir::IntAttr::get(i32Ty, ((imm >> (i % 8)) & 0x1) ? numElts + i : i));
1529
1530 return builder.createVecShuffle(getLoc(expr->getExprLoc()), ops[0], ops[1],
1531 indices);
1532 }
1533 case X86::BI__builtin_ia32_pshuflw:
1534 case X86::BI__builtin_ia32_pshuflw256:
1535 case X86::BI__builtin_ia32_pshuflw512:
1536 return emitPshufWord(builder, ops[0], ops[1], getLoc(expr->getExprLoc()),
1537 true);
1538 case X86::BI__builtin_ia32_pshufhw:
1539 case X86::BI__builtin_ia32_pshufhw256:
1540 case X86::BI__builtin_ia32_pshufhw512:
1541 return emitPshufWord(builder, ops[0], ops[1], getLoc(expr->getExprLoc()),
1542 false);
1543 case X86::BI__builtin_ia32_pshufd:
1544 case X86::BI__builtin_ia32_pshufd256:
1545 case X86::BI__builtin_ia32_pshufd512:
1546 case X86::BI__builtin_ia32_vpermilpd:
1547 case X86::BI__builtin_ia32_vpermilps:
1548 case X86::BI__builtin_ia32_vpermilpd256:
1549 case X86::BI__builtin_ia32_vpermilps256:
1550 case X86::BI__builtin_ia32_vpermilpd512:
1551 case X86::BI__builtin_ia32_vpermilps512: {
1552 const uint32_t imm = getSExtIntValueFromConstOp(ops[1]);
1553
1555 computeFullLaneShuffleMask(*this, ops[0], imm, false, mask);
1556
1557 return builder.createVecShuffle(getLoc(expr->getExprLoc()), ops[0], mask);
1558 }
1559 case X86::BI__builtin_ia32_shufpd:
1560 case X86::BI__builtin_ia32_shufpd256:
1561 case X86::BI__builtin_ia32_shufpd512:
1562 case X86::BI__builtin_ia32_shufps:
1563 case X86::BI__builtin_ia32_shufps256:
1564 case X86::BI__builtin_ia32_shufps512: {
1565 const uint32_t imm = getZExtIntValueFromConstOp(ops[2]);
1566
1568 computeFullLaneShuffleMask(*this, ops[0], imm, true, mask);
1569
1570 return builder.createVecShuffle(getLoc(expr->getExprLoc()), ops[0], ops[1],
1571 mask);
1572 }
1573 case X86::BI__builtin_ia32_permdi256:
1574 case X86::BI__builtin_ia32_permdf256:
1575 case X86::BI__builtin_ia32_permdi512:
1576 case X86::BI__builtin_ia32_permdf512: {
1577 unsigned imm =
1578 ops[1].getDefiningOp<cir::ConstantOp>().getIntValue().getZExtValue();
1579 unsigned numElts = cast<cir::VectorType>(ops[0].getType()).getSize();
1580
1581 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
1582 int64_t Indices[8];
1583
1584 for (unsigned l = 0; l != numElts; l += 4)
1585 for (unsigned i = 0; i != 4; ++i)
1586 Indices[l + i] = l + ((imm >> (2 * i)) & 0x3);
1587
1588 return builder.createVecShuffle(getLoc(expr->getExprLoc()), ops[0],
1589 ArrayRef(Indices, numElts));
1590 }
1591 case X86::BI__builtin_ia32_palignr128:
1592 case X86::BI__builtin_ia32_palignr256:
1593 case X86::BI__builtin_ia32_palignr512: {
1594 uint32_t shiftVal = getZExtIntValueFromConstOp(ops[2]) & 0xff;
1595
1596 unsigned numElts = cast<cir::VectorType>(ops[0].getType()).getSize();
1597 assert(numElts % 16 == 0);
1598
1599 // If palignr is shifting the pair of vectors more than the size of two
1600 // lanes, emit zero.
1601 if (shiftVal >= 32)
1602 return builder.getNullValue(convertType(expr->getType()),
1603 getLoc(expr->getExprLoc()));
1604
1605 // If palignr is shifting the pair of input vectors more than one lane,
1606 // but less than two lanes, convert to shifting in zeroes.
1607 if (shiftVal > 16) {
1608 shiftVal -= 16;
1609 ops[1] = ops[0];
1610 ops[0] =
1611 builder.getNullValue(ops[0].getType(), getLoc(expr->getExprLoc()));
1612 }
1613
1614 int64_t indices[64];
1615 // 256-bit palignr operates on 128-bit lanes so we need to handle that
1616 for (unsigned l = 0; l != numElts; l += 16) {
1617 for (unsigned i = 0; i != 16; ++i) {
1618 uint32_t idx = shiftVal + i;
1619 if (idx >= 16)
1620 idx += numElts - 16; // End of lane, switch operand.
1621 indices[l + i] = l + idx;
1622 }
1623 }
1624
1625 return builder.createVecShuffle(getLoc(expr->getExprLoc()), ops[1], ops[0],
1626 ArrayRef(indices, numElts));
1627 }
1628 case X86::BI__builtin_ia32_alignd128:
1629 case X86::BI__builtin_ia32_alignd256:
1630 case X86::BI__builtin_ia32_alignd512:
1631 case X86::BI__builtin_ia32_alignq128:
1632 case X86::BI__builtin_ia32_alignq256:
1633 case X86::BI__builtin_ia32_alignq512: {
1634 unsigned numElts = cast<cir::VectorType>(ops[0].getType()).getSize();
1635 unsigned shiftVal =
1636 ops[2].getDefiningOp<cir::ConstantOp>().getIntValue().getZExtValue() &
1637 0xff;
1638
1639 // Mask the shift amount to width of a vector.
1640 shiftVal &= numElts - 1;
1641
1643 mlir::Type i32Ty = builder.getSInt32Ty();
1644 for (unsigned i = 0; i != numElts; ++i)
1645 indices.push_back(cir::IntAttr::get(i32Ty, i + shiftVal));
1646
1647 return builder.createVecShuffle(getLoc(expr->getExprLoc()), ops[0], ops[1],
1648 indices);
1649 }
1650 case X86::BI__builtin_ia32_shuf_f32x4_256:
1651 case X86::BI__builtin_ia32_shuf_f64x2_256:
1652 case X86::BI__builtin_ia32_shuf_i32x4_256:
1653 case X86::BI__builtin_ia32_shuf_i64x2_256:
1654 case X86::BI__builtin_ia32_shuf_f32x4:
1655 case X86::BI__builtin_ia32_shuf_f64x2:
1656 case X86::BI__builtin_ia32_shuf_i32x4:
1657 case X86::BI__builtin_ia32_shuf_i64x2: {
1658 mlir::Value src1 = ops[0];
1659 mlir::Value src2 = ops[1];
1660
1661 unsigned imm =
1662 ops[2].getDefiningOp<cir::ConstantOp>().getIntValue().getZExtValue();
1663
1664 unsigned numElems = cast<cir::VectorType>(src1.getType()).getSize();
1665 unsigned totalBits = getContext().getTypeSize(expr->getArg(0)->getType());
1666 unsigned numLanes = totalBits == 512 ? 4 : 2;
1667 unsigned numElemsPerLane = numElems / numLanes;
1668
1670 mlir::Type i32Ty = builder.getSInt32Ty();
1671
1672 for (unsigned l = 0; l != numElems; l += numElemsPerLane) {
1673 unsigned index = (imm % numLanes) * numElemsPerLane;
1674 imm /= numLanes;
1675 if (l >= (numElems / 2))
1676 index += numElems;
1677 for (unsigned i = 0; i != numElemsPerLane; ++i) {
1678 indices.push_back(cir::IntAttr::get(i32Ty, index + i));
1679 }
1680 }
1681
1682 return builder.createVecShuffle(getLoc(expr->getExprLoc()), src1, src2,
1683 indices);
1684 }
1685 case X86::BI__builtin_ia32_vperm2f128_pd256:
1686 case X86::BI__builtin_ia32_vperm2f128_ps256:
1687 case X86::BI__builtin_ia32_vperm2f128_si256:
1688 case X86::BI__builtin_ia32_permti256:
1689 case X86::BI__builtin_ia32_pslldqi128_byteshift:
1690 case X86::BI__builtin_ia32_pslldqi256_byteshift:
1691 case X86::BI__builtin_ia32_pslldqi512_byteshift:
1692 case X86::BI__builtin_ia32_psrldqi128_byteshift:
1693 case X86::BI__builtin_ia32_psrldqi256_byteshift:
1694 case X86::BI__builtin_ia32_psrldqi512_byteshift:
1695 cgm.errorNYI(expr->getSourceRange(),
1696 std::string("unimplemented X86 builtin call: ") +
1697 getContext().BuiltinInfo.getName(builtinID));
1698 return mlir::Value{};
1699 case X86::BI__builtin_ia32_kshiftliqi:
1700 case X86::BI__builtin_ia32_kshiftlihi:
1701 case X86::BI__builtin_ia32_kshiftlisi:
1702 case X86::BI__builtin_ia32_kshiftlidi: {
1703 mlir::Location loc = getLoc(expr->getExprLoc());
1704 unsigned shiftVal =
1705 ops[1].getDefiningOp<cir::ConstantOp>().getIntValue().getZExtValue() &
1706 0xff;
1707 unsigned numElems = cast<cir::IntType>(ops[0].getType()).getWidth();
1708
1709 if (shiftVal >= numElems)
1710 return builder.getNullValue(ops[0].getType(), loc);
1711
1712 mlir::Value in = getMaskVecValue(builder, loc, ops[0], numElems);
1713
1715 mlir::Type i32Ty = builder.getSInt32Ty();
1716 for (auto i : llvm::seq<unsigned>(0, numElems))
1717 indices.push_back(cir::IntAttr::get(i32Ty, numElems + i - shiftVal));
1718
1719 mlir::Value zero = builder.getNullValue(in.getType(), loc);
1720 mlir::Value sv = builder.createVecShuffle(loc, zero, in, indices);
1721 return builder.createBitcast(sv, ops[0].getType());
1722 }
1723 case X86::BI__builtin_ia32_kshiftriqi:
1724 case X86::BI__builtin_ia32_kshiftrihi:
1725 case X86::BI__builtin_ia32_kshiftrisi:
1726 case X86::BI__builtin_ia32_kshiftridi: {
1727 mlir::Location loc = getLoc(expr->getExprLoc());
1728 unsigned shiftVal =
1729 ops[1].getDefiningOp<cir::ConstantOp>().getIntValue().getZExtValue() &
1730 0xff;
1731 unsigned numElems = cast<cir::IntType>(ops[0].getType()).getWidth();
1732
1733 if (shiftVal >= numElems)
1734 return builder.getNullValue(ops[0].getType(), loc);
1735
1736 mlir::Value in = getMaskVecValue(builder, loc, ops[0], numElems);
1737
1739 mlir::Type i32Ty = builder.getSInt32Ty();
1740 for (auto i : llvm::seq<unsigned>(0, numElems))
1741 indices.push_back(cir::IntAttr::get(i32Ty, i + shiftVal));
1742
1743 mlir::Value zero = builder.getNullValue(in.getType(), loc);
1744 mlir::Value sv = builder.createVecShuffle(loc, in, zero, indices);
1745 return builder.createBitcast(sv, ops[0].getType());
1746 }
1747 case X86::BI__builtin_ia32_vprotbi:
1748 case X86::BI__builtin_ia32_vprotwi:
1749 case X86::BI__builtin_ia32_vprotdi:
1750 case X86::BI__builtin_ia32_vprotqi:
1751 case X86::BI__builtin_ia32_prold128:
1752 case X86::BI__builtin_ia32_prold256:
1753 case X86::BI__builtin_ia32_prold512:
1754 case X86::BI__builtin_ia32_prolq128:
1755 case X86::BI__builtin_ia32_prolq256:
1756 case X86::BI__builtin_ia32_prolq512:
1757 return emitX86FunnelShift(builder, getLoc(expr->getExprLoc()), ops[0],
1758 ops[0], ops[1], false);
1759 case X86::BI__builtin_ia32_prord128:
1760 case X86::BI__builtin_ia32_prord256:
1761 case X86::BI__builtin_ia32_prord512:
1762 case X86::BI__builtin_ia32_prorq128:
1763 case X86::BI__builtin_ia32_prorq256:
1764 case X86::BI__builtin_ia32_prorq512:
1765 return emitX86FunnelShift(builder, getLoc(expr->getExprLoc()), ops[0],
1766 ops[0], ops[1], true);
1767 case X86::BI__builtin_ia32_selectb_128:
1768 case X86::BI__builtin_ia32_selectb_256:
1769 case X86::BI__builtin_ia32_selectb_512:
1770 case X86::BI__builtin_ia32_selectw_128:
1771 case X86::BI__builtin_ia32_selectw_256:
1772 case X86::BI__builtin_ia32_selectw_512:
1773 case X86::BI__builtin_ia32_selectd_128:
1774 case X86::BI__builtin_ia32_selectd_256:
1775 case X86::BI__builtin_ia32_selectd_512:
1776 case X86::BI__builtin_ia32_selectq_128:
1777 case X86::BI__builtin_ia32_selectq_256:
1778 case X86::BI__builtin_ia32_selectq_512:
1779 case X86::BI__builtin_ia32_selectph_128:
1780 case X86::BI__builtin_ia32_selectph_256:
1781 case X86::BI__builtin_ia32_selectph_512:
1782 case X86::BI__builtin_ia32_selectpbf_128:
1783 case X86::BI__builtin_ia32_selectpbf_256:
1784 case X86::BI__builtin_ia32_selectpbf_512:
1785 case X86::BI__builtin_ia32_selectps_128:
1786 case X86::BI__builtin_ia32_selectps_256:
1787 case X86::BI__builtin_ia32_selectps_512:
1788 case X86::BI__builtin_ia32_selectpd_128:
1789 case X86::BI__builtin_ia32_selectpd_256:
1790 case X86::BI__builtin_ia32_selectpd_512:
1791 case X86::BI__builtin_ia32_selectsh_128:
1792 case X86::BI__builtin_ia32_selectsbf_128:
1793 case X86::BI__builtin_ia32_selectss_128:
1794 case X86::BI__builtin_ia32_selectsd_128:
1795 case X86::BI__builtin_ia32_cmpb128_mask:
1796 case X86::BI__builtin_ia32_cmpb256_mask:
1797 case X86::BI__builtin_ia32_cmpb512_mask:
1798 case X86::BI__builtin_ia32_cmpw128_mask:
1799 case X86::BI__builtin_ia32_cmpw256_mask:
1800 case X86::BI__builtin_ia32_cmpw512_mask:
1801 case X86::BI__builtin_ia32_cmpd128_mask:
1802 case X86::BI__builtin_ia32_cmpd256_mask:
1803 case X86::BI__builtin_ia32_cmpd512_mask:
1804 case X86::BI__builtin_ia32_cmpq128_mask:
1805 case X86::BI__builtin_ia32_cmpq256_mask:
1806 case X86::BI__builtin_ia32_cmpq512_mask:
1807 case X86::BI__builtin_ia32_ucmpb128_mask:
1808 case X86::BI__builtin_ia32_ucmpb256_mask:
1809 case X86::BI__builtin_ia32_ucmpb512_mask:
1810 case X86::BI__builtin_ia32_ucmpw128_mask:
1811 case X86::BI__builtin_ia32_ucmpw256_mask:
1812 case X86::BI__builtin_ia32_ucmpw512_mask:
1813 case X86::BI__builtin_ia32_ucmpd128_mask:
1814 case X86::BI__builtin_ia32_ucmpd256_mask:
1815 case X86::BI__builtin_ia32_ucmpd512_mask:
1816 case X86::BI__builtin_ia32_ucmpq128_mask:
1817 case X86::BI__builtin_ia32_ucmpq256_mask:
1818 case X86::BI__builtin_ia32_ucmpq512_mask:
1819 cgm.errorNYI(expr->getSourceRange(),
1820 std::string("unimplemented X86 builtin call: ") +
1821 getContext().BuiltinInfo.getName(builtinID));
1822 return mlir::Value{};
1823 case X86::BI__builtin_ia32_vpcomb:
1824 case X86::BI__builtin_ia32_vpcomw:
1825 case X86::BI__builtin_ia32_vpcomd:
1826 case X86::BI__builtin_ia32_vpcomq:
1827 return emitX86vpcom(builder, getLoc(expr->getExprLoc()), ops, true);
1828 case X86::BI__builtin_ia32_vpcomub:
1829 case X86::BI__builtin_ia32_vpcomuw:
1830 case X86::BI__builtin_ia32_vpcomud:
1831 case X86::BI__builtin_ia32_vpcomuq:
1832 return emitX86vpcom(builder, getLoc(expr->getExprLoc()), ops, false);
1833 case X86::BI__builtin_ia32_kortestcqi:
1834 case X86::BI__builtin_ia32_kortestchi:
1835 case X86::BI__builtin_ia32_kortestcsi:
1836 case X86::BI__builtin_ia32_kortestcdi: {
1837 mlir::Location loc = getLoc(expr->getExprLoc());
1838 cir::IntType ty = cast<cir::IntType>(ops[0].getType());
1839 mlir::Value allOnesOp =
1840 builder.getConstAPInt(loc, ty, APInt::getAllOnes(ty.getWidth()));
1841 mlir::Value orOp = emitX86MaskLogic(builder, loc, cir::BinOpKind::Or, ops);
1842 mlir::Value cmp =
1843 cir::CmpOp::create(builder, loc, cir::CmpOpKind::eq, orOp, allOnesOp);
1844 return builder.createCast(cir::CastKind::bool_to_int, cmp,
1845 cgm.convertType(expr->getType()));
1846 }
1847 case X86::BI__builtin_ia32_kortestzqi:
1848 case X86::BI__builtin_ia32_kortestzhi:
1849 case X86::BI__builtin_ia32_kortestzsi:
1850 case X86::BI__builtin_ia32_kortestzdi: {
1851 mlir::Location loc = getLoc(expr->getExprLoc());
1852 cir::IntType ty = cast<cir::IntType>(ops[0].getType());
1853 mlir::Value allZerosOp = builder.getNullValue(ty, loc).getResult();
1854 mlir::Value orOp = emitX86MaskLogic(builder, loc, cir::BinOpKind::Or, ops);
1855 mlir::Value cmp =
1856 cir::CmpOp::create(builder, loc, cir::CmpOpKind::eq, orOp, allZerosOp);
1857 return builder.createCast(cir::CastKind::bool_to_int, cmp,
1858 cgm.convertType(expr->getType()));
1859 }
1860 case X86::BI__builtin_ia32_ktestcqi:
1861 return emitX86MaskTest(builder, getLoc(expr->getExprLoc()),
1862 "x86.avx512.ktestc.b", ops);
1863 case X86::BI__builtin_ia32_ktestzqi:
1864 return emitX86MaskTest(builder, getLoc(expr->getExprLoc()),
1865 "x86.avx512.ktestz.b", ops);
1866 case X86::BI__builtin_ia32_ktestchi:
1867 return emitX86MaskTest(builder, getLoc(expr->getExprLoc()),
1868 "x86.avx512.ktestc.w", ops);
1869 case X86::BI__builtin_ia32_ktestzhi:
1870 return emitX86MaskTest(builder, getLoc(expr->getExprLoc()),
1871 "x86.avx512.ktestz.w", ops);
1872 case X86::BI__builtin_ia32_ktestcsi:
1873 return emitX86MaskTest(builder, getLoc(expr->getExprLoc()),
1874 "x86.avx512.ktestc.d", ops);
1875 case X86::BI__builtin_ia32_ktestzsi:
1876 return emitX86MaskTest(builder, getLoc(expr->getExprLoc()),
1877 "x86.avx512.ktestz.d", ops);
1878 case X86::BI__builtin_ia32_ktestcdi:
1879 return emitX86MaskTest(builder, getLoc(expr->getExprLoc()),
1880 "x86.avx512.ktestc.q", ops);
1881 case X86::BI__builtin_ia32_ktestzdi:
1882 return emitX86MaskTest(builder, getLoc(expr->getExprLoc()),
1883 "x86.avx512.ktestz.q", ops);
1884 case X86::BI__builtin_ia32_kaddqi:
1885 return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()),
1886 "x86.avx512.kadd.b", ops);
1887 case X86::BI__builtin_ia32_kaddhi:
1888 return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()),
1889 "x86.avx512.kadd.w", ops);
1890 case X86::BI__builtin_ia32_kaddsi:
1891 return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()),
1892 "x86.avx512.kadd.d", ops);
1893 case X86::BI__builtin_ia32_kadddi:
1894 return emitX86MaskAddLogic(builder, getLoc(expr->getExprLoc()),
1895 "x86.avx512.kadd.q", ops);
1896 case X86::BI__builtin_ia32_kandqi:
1897 case X86::BI__builtin_ia32_kandhi:
1898 case X86::BI__builtin_ia32_kandsi:
1899 case X86::BI__builtin_ia32_kanddi:
1900 return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
1901 cir::BinOpKind::And, ops);
1902 case X86::BI__builtin_ia32_kandnqi:
1903 case X86::BI__builtin_ia32_kandnhi:
1904 case X86::BI__builtin_ia32_kandnsi:
1905 case X86::BI__builtin_ia32_kandndi:
1906 return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
1907 cir::BinOpKind::And, ops, true);
1908 case X86::BI__builtin_ia32_korqi:
1909 case X86::BI__builtin_ia32_korhi:
1910 case X86::BI__builtin_ia32_korsi:
1911 case X86::BI__builtin_ia32_kordi:
1912 return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
1913 cir::BinOpKind::Or, ops);
1914 case X86::BI__builtin_ia32_kxnorqi:
1915 case X86::BI__builtin_ia32_kxnorhi:
1916 case X86::BI__builtin_ia32_kxnorsi:
1917 case X86::BI__builtin_ia32_kxnordi:
1918 return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
1919 cir::BinOpKind::Xor, ops, true);
1920 case X86::BI__builtin_ia32_kxorqi:
1921 case X86::BI__builtin_ia32_kxorhi:
1922 case X86::BI__builtin_ia32_kxorsi:
1923 case X86::BI__builtin_ia32_kxordi:
1924 return emitX86MaskLogic(builder, getLoc(expr->getExprLoc()),
1925 cir::BinOpKind::Xor, ops);
1926 case X86::BI__builtin_ia32_knotqi:
1927 case X86::BI__builtin_ia32_knothi:
1928 case X86::BI__builtin_ia32_knotsi:
1929 case X86::BI__builtin_ia32_knotdi: {
1930 cir::IntType intTy = cast<cir::IntType>(ops[0].getType());
1931 unsigned numElts = intTy.getWidth();
1932 mlir::Value resVec =
1933 getMaskVecValue(builder, getLoc(expr->getExprLoc()), ops[0], numElts);
1934 return builder.createBitcast(builder.createNot(resVec), ops[0].getType());
1935 }
1936 case X86::BI__builtin_ia32_kmovb:
1937 case X86::BI__builtin_ia32_kmovw:
1938 case X86::BI__builtin_ia32_kmovd:
1939 case X86::BI__builtin_ia32_kmovq: {
1940 // Bitcast to vXi1 type and then back to integer. This gets the mask
1941 // register type into the IR, but might be optimized out depending on
1942 // what's around it.
1943 cir::IntType intTy = cast<cir::IntType>(ops[0].getType());
1944 unsigned numElts = intTy.getWidth();
1945 mlir::Value resVec =
1946 getMaskVecValue(builder, getLoc(expr->getExprLoc()), ops[0], numElts);
1947 return builder.createBitcast(resVec, ops[0].getType());
1948 }
1949 case X86::BI__builtin_ia32_sqrtsh_round_mask:
1950 case X86::BI__builtin_ia32_sqrtsd_round_mask:
1951 case X86::BI__builtin_ia32_sqrtss_round_mask:
1952 cgm.errorNYI(expr->getSourceRange(),
1953 std::string("unimplemented X86 builtin call: ") +
1954 getContext().BuiltinInfo.getName(builtinID));
1955 return mlir::Value{};
1956 case X86::BI__builtin_ia32_sqrtph512:
1957 case X86::BI__builtin_ia32_sqrtps512:
1958 case X86::BI__builtin_ia32_sqrtpd512: {
1959 mlir::Location loc = getLoc(expr->getExprLoc());
1960 mlir::Value arg = ops[0];
1961 return cir::SqrtOp::create(builder, loc, arg.getType(), arg).getResult();
1962 }
1963 case X86::BI__builtin_ia32_pmuludq128:
1964 case X86::BI__builtin_ia32_pmuludq256:
1965 case X86::BI__builtin_ia32_pmuludq512: {
1966 unsigned opTypePrimitiveSizeInBits =
1967 cgm.getDataLayout().getTypeSizeInBits(ops[0].getType());
1968 return emitX86Muldq(builder, getLoc(expr->getExprLoc()), /*isSigned*/ false,
1969 ops, opTypePrimitiveSizeInBits);
1970 }
1971 case X86::BI__builtin_ia32_pmuldq128:
1972 case X86::BI__builtin_ia32_pmuldq256:
1973 case X86::BI__builtin_ia32_pmuldq512: {
1974 unsigned opTypePrimitiveSizeInBits =
1975 cgm.getDataLayout().getTypeSizeInBits(ops[0].getType());
1976 return emitX86Muldq(builder, getLoc(expr->getExprLoc()), /*isSigned*/ true,
1977 ops, opTypePrimitiveSizeInBits);
1978 }
1979 case X86::BI__builtin_ia32_pternlogd512_mask:
1980 case X86::BI__builtin_ia32_pternlogq512_mask:
1981 case X86::BI__builtin_ia32_pternlogd128_mask:
1982 case X86::BI__builtin_ia32_pternlogd256_mask:
1983 case X86::BI__builtin_ia32_pternlogq128_mask:
1984 case X86::BI__builtin_ia32_pternlogq256_mask:
1985 case X86::BI__builtin_ia32_pternlogd512_maskz:
1986 case X86::BI__builtin_ia32_pternlogq512_maskz:
1987 case X86::BI__builtin_ia32_pternlogd128_maskz:
1988 case X86::BI__builtin_ia32_pternlogd256_maskz:
1989 case X86::BI__builtin_ia32_pternlogq128_maskz:
1990 case X86::BI__builtin_ia32_pternlogq256_maskz:
1991 case X86::BI__builtin_ia32_vpshldd128:
1992 case X86::BI__builtin_ia32_vpshldd256:
1993 case X86::BI__builtin_ia32_vpshldd512:
1994 case X86::BI__builtin_ia32_vpshldq128:
1995 case X86::BI__builtin_ia32_vpshldq256:
1996 case X86::BI__builtin_ia32_vpshldq512:
1997 case X86::BI__builtin_ia32_vpshldw128:
1998 case X86::BI__builtin_ia32_vpshldw256:
1999 case X86::BI__builtin_ia32_vpshldw512:
2000 case X86::BI__builtin_ia32_vpshrdd128:
2001 case X86::BI__builtin_ia32_vpshrdd256:
2002 case X86::BI__builtin_ia32_vpshrdd512:
2003 case X86::BI__builtin_ia32_vpshrdq128:
2004 case X86::BI__builtin_ia32_vpshrdq256:
2005 case X86::BI__builtin_ia32_vpshrdq512:
2006 case X86::BI__builtin_ia32_vpshrdw128:
2007 case X86::BI__builtin_ia32_vpshrdw256:
2008 case X86::BI__builtin_ia32_vpshrdw512:
2009 cgm.errorNYI(expr->getSourceRange(),
2010 std::string("unimplemented X86 builtin call: ") +
2011 getContext().BuiltinInfo.getName(builtinID));
2012 return mlir::Value{};
2013 case X86::BI__builtin_ia32_reduce_fadd_pd512:
2014 case X86::BI__builtin_ia32_reduce_fadd_ps512:
2015 case X86::BI__builtin_ia32_reduce_fadd_ph512:
2016 case X86::BI__builtin_ia32_reduce_fadd_ph256:
2017 case X86::BI__builtin_ia32_reduce_fadd_ph128: {
2019 return builder.emitIntrinsicCallOp(getLoc(expr->getExprLoc()),
2020 "vector.reduce.fadd", ops[0].getType(),
2021 mlir::ValueRange{ops[0], ops[1]});
2022 }
2023 case X86::BI__builtin_ia32_reduce_fmul_pd512:
2024 case X86::BI__builtin_ia32_reduce_fmul_ps512:
2025 case X86::BI__builtin_ia32_reduce_fmul_ph512:
2026 case X86::BI__builtin_ia32_reduce_fmul_ph256:
2027 case X86::BI__builtin_ia32_reduce_fmul_ph128: {
2029 return builder.emitIntrinsicCallOp(getLoc(expr->getExprLoc()),
2030 "vector.reduce.fmul", ops[0].getType(),
2031 mlir::ValueRange{ops[0], ops[1]});
2032 }
2033 case X86::BI__builtin_ia32_reduce_fmax_pd512:
2034 case X86::BI__builtin_ia32_reduce_fmax_ps512:
2035 case X86::BI__builtin_ia32_reduce_fmax_ph512:
2036 case X86::BI__builtin_ia32_reduce_fmax_ph256:
2037 case X86::BI__builtin_ia32_reduce_fmax_ph128: {
2039 cir::VectorType vecTy = cast<cir::VectorType>(ops[0].getType());
2040 return builder.emitIntrinsicCallOp(
2041 getLoc(expr->getExprLoc()), "vector.reduce.fmax",
2042 vecTy.getElementType(), mlir::ValueRange{ops[0]});
2043 }
2044 case X86::BI__builtin_ia32_reduce_fmin_pd512:
2045 case X86::BI__builtin_ia32_reduce_fmin_ps512:
2046 case X86::BI__builtin_ia32_reduce_fmin_ph512:
2047 case X86::BI__builtin_ia32_reduce_fmin_ph256:
2048 case X86::BI__builtin_ia32_reduce_fmin_ph128: {
2050 cir::VectorType vecTy = cast<cir::VectorType>(ops[0].getType());
2051 return builder.emitIntrinsicCallOp(
2052 getLoc(expr->getExprLoc()), "vector.reduce.fmin",
2053 vecTy.getElementType(), mlir::ValueRange{ops[0]});
2054 }
2055 case X86::BI__builtin_ia32_rdrand16_step:
2056 case X86::BI__builtin_ia32_rdrand32_step:
2057 case X86::BI__builtin_ia32_rdrand64_step:
2058 case X86::BI__builtin_ia32_rdseed16_step:
2059 case X86::BI__builtin_ia32_rdseed32_step:
2060 case X86::BI__builtin_ia32_rdseed64_step: {
2061 llvm::StringRef intrinsicName;
2062 switch (builtinID) {
2063 default:
2064 llvm_unreachable("Unsupported intrinsic!");
2065 case X86::BI__builtin_ia32_rdrand16_step:
2066 intrinsicName = "x86.rdrand.16";
2067 break;
2068 case X86::BI__builtin_ia32_rdrand32_step:
2069 intrinsicName = "x86.rdrand.32";
2070 break;
2071 case X86::BI__builtin_ia32_rdrand64_step:
2072 intrinsicName = "x86.rdrand.64";
2073 break;
2074 case X86::BI__builtin_ia32_rdseed16_step:
2075 intrinsicName = "x86.rdseed.16";
2076 break;
2077 case X86::BI__builtin_ia32_rdseed32_step:
2078 intrinsicName = "x86.rdseed.32";
2079 break;
2080 case X86::BI__builtin_ia32_rdseed64_step:
2081 intrinsicName = "x86.rdseed.64";
2082 break;
2083 }
2084
2085 mlir::Location loc = getLoc(expr->getExprLoc());
2086 mlir::Type randTy = cast<cir::PointerType>(ops[0].getType()).getPointee();
2087 llvm::SmallVector<mlir::Type, 2> resultTypes = {randTy,
2088 builder.getUInt32Ty()};
2089 cir::RecordType resRecord =
2090 cir::RecordType::get(&getMLIRContext(), resultTypes, false, false,
2091 cir::RecordType::RecordKind::Struct);
2092
2093 mlir::Value call =
2094 builder.emitIntrinsicCallOp(loc, intrinsicName, resRecord);
2095 mlir::Value rand =
2096 cir::ExtractMemberOp::create(builder, loc, randTy, call, 0);
2097 builder.CIRBaseBuilderTy::createStore(loc, rand, ops[0]);
2098
2099 return cir::ExtractMemberOp::create(builder, loc, builder.getUInt32Ty(),
2100 call, 1);
2101 }
2102 case X86::BI__builtin_ia32_addcarryx_u32:
2103 case X86::BI__builtin_ia32_addcarryx_u64:
2104 case X86::BI__builtin_ia32_subborrow_u32:
2105 case X86::BI__builtin_ia32_subborrow_u64:
2106 cgm.errorNYI(expr->getSourceRange(),
2107 std::string("unimplemented X86 builtin call: ") +
2108 getContext().BuiltinInfo.getName(builtinID));
2109 return mlir::Value{};
2110 case X86::BI__builtin_ia32_fpclassps128_mask:
2111 case X86::BI__builtin_ia32_fpclassps256_mask:
2112 case X86::BI__builtin_ia32_fpclassps512_mask:
2113 case X86::BI__builtin_ia32_vfpclassbf16128_mask:
2114 case X86::BI__builtin_ia32_vfpclassbf16256_mask:
2115 case X86::BI__builtin_ia32_vfpclassbf16512_mask:
2116 case X86::BI__builtin_ia32_fpclassph128_mask:
2117 case X86::BI__builtin_ia32_fpclassph256_mask:
2118 case X86::BI__builtin_ia32_fpclassph512_mask:
2119 case X86::BI__builtin_ia32_fpclasspd128_mask:
2120 case X86::BI__builtin_ia32_fpclasspd256_mask:
2121 case X86::BI__builtin_ia32_fpclasspd512_mask:
2122 return emitX86Fpclass(builder, getLoc(expr->getExprLoc()), builtinID, ops);
2123 case X86::BI__builtin_ia32_vp2intersect_q_512:
2124 case X86::BI__builtin_ia32_vp2intersect_q_256:
2125 case X86::BI__builtin_ia32_vp2intersect_q_128:
2126 case X86::BI__builtin_ia32_vp2intersect_d_512:
2127 case X86::BI__builtin_ia32_vp2intersect_d_256:
2128 case X86::BI__builtin_ia32_vp2intersect_d_128: {
2129 unsigned numElts = cast<cir::VectorType>(ops[0].getType()).getSize();
2130 mlir::Location loc = getLoc(expr->getExprLoc());
2131 StringRef intrinsicName;
2132
2133 switch (builtinID) {
2134 default:
2135 llvm_unreachable("Unexpected builtin");
2136 case X86::BI__builtin_ia32_vp2intersect_q_512:
2137 intrinsicName = "x86.avx512.vp2intersect.q.512";
2138 break;
2139 case X86::BI__builtin_ia32_vp2intersect_q_256:
2140 intrinsicName = "x86.avx512.vp2intersect.q.256";
2141 break;
2142 case X86::BI__builtin_ia32_vp2intersect_q_128:
2143 intrinsicName = "x86.avx512.vp2intersect.q.128";
2144 break;
2145 case X86::BI__builtin_ia32_vp2intersect_d_512:
2146 intrinsicName = "x86.avx512.vp2intersect.d.512";
2147 break;
2148 case X86::BI__builtin_ia32_vp2intersect_d_256:
2149 intrinsicName = "x86.avx512.vp2intersect.d.256";
2150 break;
2151 case X86::BI__builtin_ia32_vp2intersect_d_128:
2152 intrinsicName = "x86.avx512.vp2intersect.d.128";
2153 break;
2154 }
2155
2156 auto resVector = cir::VectorType::get(builder.getBoolTy(), numElts);
2157
2158 cir::RecordType resRecord =
2159 cir::RecordType::get(&getMLIRContext(), {resVector, resVector}, false,
2160 false, cir::RecordType::RecordKind::Struct);
2161
2162 mlir::Value call = builder.emitIntrinsicCallOp(
2163 getLoc(expr->getExprLoc()), intrinsicName, resRecord,
2164 mlir::ValueRange{ops[0], ops[1]});
2165 mlir::Value result =
2166 cir::ExtractMemberOp::create(builder, loc, resVector, call, 0);
2167 result = emitX86MaskedCompareResult(builder, result, numElts, nullptr, loc);
2168 Address addr = Address(
2169 ops[2], clang::CharUnits::fromQuantity(std::max(1U, numElts / 8)));
2170 builder.createStore(loc, result, addr);
2171
2172 result = cir::ExtractMemberOp::create(builder, loc, resVector, call, 1);
2173 result = emitX86MaskedCompareResult(builder, result, numElts, nullptr, loc);
2174 addr = Address(ops[3],
2175 clang::CharUnits::fromQuantity(std::max(1U, numElts / 8)));
2176 builder.createStore(loc, result, addr);
2177 return mlir::Value{};
2178 }
2179 case X86::BI__builtin_ia32_vpmultishiftqb128:
2180 case X86::BI__builtin_ia32_vpmultishiftqb256:
2181 case X86::BI__builtin_ia32_vpmultishiftqb512:
2182 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
2183 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
2184 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
2185 case X86::BI__builtin_ia32_cmpeqps:
2186 case X86::BI__builtin_ia32_cmpeqpd:
2187 case X86::BI__builtin_ia32_cmpltps:
2188 case X86::BI__builtin_ia32_cmpltpd:
2189 case X86::BI__builtin_ia32_cmpleps:
2190 case X86::BI__builtin_ia32_cmplepd:
2191 case X86::BI__builtin_ia32_cmpunordps:
2192 case X86::BI__builtin_ia32_cmpunordpd:
2193 case X86::BI__builtin_ia32_cmpneqps:
2194 case X86::BI__builtin_ia32_cmpneqpd:
2195 cgm.errorNYI(expr->getSourceRange(),
2196 std::string("unimplemented X86 builtin call: ") +
2197 getContext().BuiltinInfo.getName(builtinID));
2198 return mlir::Value{};
2199 case X86::BI__builtin_ia32_cmpnltps:
2200 case X86::BI__builtin_ia32_cmpnltpd:
2201 return emitVectorFCmp(builder, ops, getLoc(expr->getExprLoc()),
2202 cir::CmpOpKind::lt, /*shouldInvert=*/true);
2203 case X86::BI__builtin_ia32_cmpnleps:
2204 case X86::BI__builtin_ia32_cmpnlepd:
2205 return emitVectorFCmp(builder, ops, getLoc(expr->getExprLoc()),
2206 cir::CmpOpKind::le, /*shouldInvert=*/true);
2207 case X86::BI__builtin_ia32_cmpordps:
2208 case X86::BI__builtin_ia32_cmpordpd:
2209 case X86::BI__builtin_ia32_cmpph128_mask:
2210 case X86::BI__builtin_ia32_cmpph256_mask:
2211 case X86::BI__builtin_ia32_cmpph512_mask:
2212 case X86::BI__builtin_ia32_cmpps128_mask:
2213 case X86::BI__builtin_ia32_cmpps256_mask:
2214 case X86::BI__builtin_ia32_cmpps512_mask:
2215 case X86::BI__builtin_ia32_cmppd128_mask:
2216 case X86::BI__builtin_ia32_cmppd256_mask:
2217 case X86::BI__builtin_ia32_cmppd512_mask:
2218 case X86::BI__builtin_ia32_vcmpbf16512_mask:
2219 case X86::BI__builtin_ia32_vcmpbf16256_mask:
2220 case X86::BI__builtin_ia32_vcmpbf16128_mask:
2221 case X86::BI__builtin_ia32_cmpps:
2222 case X86::BI__builtin_ia32_cmpps256:
2223 case X86::BI__builtin_ia32_cmppd:
2224 case X86::BI__builtin_ia32_cmppd256:
2225 case X86::BI__builtin_ia32_cmpeqss:
2226 case X86::BI__builtin_ia32_cmpltss:
2227 case X86::BI__builtin_ia32_cmpless:
2228 case X86::BI__builtin_ia32_cmpunordss:
2229 case X86::BI__builtin_ia32_cmpneqss:
2230 case X86::BI__builtin_ia32_cmpnltss:
2231 case X86::BI__builtin_ia32_cmpnless:
2232 case X86::BI__builtin_ia32_cmpordss:
2233 case X86::BI__builtin_ia32_cmpeqsd:
2234 case X86::BI__builtin_ia32_cmpltsd:
2235 case X86::BI__builtin_ia32_cmplesd:
2236 case X86::BI__builtin_ia32_cmpunordsd:
2237 case X86::BI__builtin_ia32_cmpneqsd:
2238 case X86::BI__builtin_ia32_cmpnltsd:
2239 case X86::BI__builtin_ia32_cmpnlesd:
2240 case X86::BI__builtin_ia32_cmpordsd:
2241 cgm.errorNYI(expr->getSourceRange(),
2242 std::string("unimplemented X86 builtin call: ") +
2243 getContext().BuiltinInfo.getName(builtinID));
2244 return {};
2245 case X86::BI__builtin_ia32_vcvtph2ps_mask:
2246 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
2247 case X86::BI__builtin_ia32_vcvtph2ps512_mask: {
2248 mlir::Location loc = getLoc(expr->getExprLoc());
2249 return emitX86CvtF16ToFloatExpr(builder, loc, ops,
2250 convertType(expr->getType()));
2251 }
2252 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
2253 mlir::Location loc = getLoc(expr->getExprLoc());
2254 cir::VectorType resTy = cast<cir::VectorType>(convertType(expr->getType()));
2255
2256 cir::VectorType inputTy = cast<cir::VectorType>(ops[0].getType());
2257 unsigned numElts = inputTy.getSize();
2258
2259 mlir::Value mask = getMaskVecValue(builder, loc, ops[2], numElts);
2260
2262 args.push_back(ops[0]);
2263 args.push_back(ops[1]);
2264 args.push_back(mask);
2265
2266 return builder.emitIntrinsicCallOp(
2267 loc, "x86.avx512bf16.mask.cvtneps2bf16.128", resTy, args);
2268 }
2269 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
2270 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
2271 mlir::Location loc = getLoc(expr->getExprLoc());
2272 cir::VectorType resTy = cast<cir::VectorType>(convertType(expr->getType()));
2273 StringRef intrinsicName;
2274 if (builtinID == X86::BI__builtin_ia32_cvtneps2bf16_256_mask) {
2275 intrinsicName = "x86.avx512bf16.cvtneps2bf16.256";
2276 } else {
2277 assert(builtinID == X86::BI__builtin_ia32_cvtneps2bf16_512_mask);
2278 intrinsicName = "x86.avx512bf16.cvtneps2bf16.512";
2279 }
2280
2281 mlir::Value res = builder.emitIntrinsicCallOp(loc, intrinsicName, resTy,
2282 mlir::ValueRange{ops[0]});
2283
2284 return emitX86Select(builder, loc, ops[2], res, ops[1]);
2285 }
2286 case X86::BI__cpuid:
2287 case X86::BI__cpuidex: {
2288 mlir::Location loc = getLoc(expr->getExprLoc());
2289 mlir::Value subFuncId = builtinID == X86::BI__cpuidex
2290 ? ops[2]
2291 : builder.getConstInt(loc, sInt32Ty, 0);
2292 cir::CpuIdOp::create(builder, loc, /*cpuInfo=*/ops[0],
2293 /*functionId=*/ops[1], /*subFunctionId=*/subFuncId);
2294 return mlir::Value{};
2295 }
2296 case X86::BI__emul:
2297 case X86::BI__emulu:
2298 case X86::BI__mulh:
2299 case X86::BI__umulh:
2300 case X86::BI_mul128:
2301 case X86::BI_umul128: {
2302 cgm.errorNYI(expr->getSourceRange(),
2303 std::string("unimplemented X86 builtin call: ") +
2304 getContext().BuiltinInfo.getName(builtinID));
2305 return mlir::Value{};
2306 }
2307 case X86::BI__faststorefence: {
2308 cir::AtomicFenceOp::create(
2309 builder, getLoc(expr->getExprLoc()),
2310 cir::MemOrder::SequentiallyConsistent,
2311 cir::SyncScopeKindAttr::get(&getMLIRContext(),
2312 cir::SyncScopeKind::System));
2313 return mlir::Value{};
2314 }
2315 case X86::BI__shiftleft128:
2316 case X86::BI__shiftright128: {
2317 // Flip low/high ops and zero-extend amount to matching type.
2318 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
2319 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
2320 std::swap(ops[0], ops[1]);
2321
2322 // Zero-extend shift amount to i64 if needed
2323 auto amtTy = mlir::cast<cir::IntType>(ops[2].getType());
2324 cir::IntType i64Ty = builder.getUInt64Ty();
2325
2326 if (amtTy != i64Ty)
2327 ops[2] = builder.createIntCast(ops[2], i64Ty);
2328
2329 const StringRef intrinsicName =
2330 (builtinID == X86::BI__shiftleft128) ? "fshl" : "fshr";
2331 return builder.emitIntrinsicCallOp(
2332 getLoc(expr->getExprLoc()), intrinsicName, i64Ty,
2333 mlir::ValueRange{ops[0], ops[1], ops[2]});
2334 }
2335 case X86::BI_ReadWriteBarrier:
2336 case X86::BI_ReadBarrier:
2337 case X86::BI_WriteBarrier: {
2338 cir::AtomicFenceOp::create(
2339 builder, getLoc(expr->getExprLoc()),
2340 cir::MemOrder::SequentiallyConsistent,
2341 cir::SyncScopeKindAttr::get(&getMLIRContext(),
2342 cir::SyncScopeKind::SingleThread));
2343 return mlir::Value{};
2344 }
2345 case X86::BI_AddressOfReturnAddress: {
2346 mlir::Location loc = getLoc(expr->getExprLoc());
2347 mlir::Value addr =
2348 cir::AddrOfReturnAddrOp::create(builder, loc, allocaInt8PtrTy);
2349 return builder.createCast(loc, cir::CastKind::bitcast, addr, voidPtrTy);
2350 }
2351 case X86::BI__stosb:
2352 case X86::BI__ud2:
2353 case X86::BI__int2c:
2354 case X86::BI__readfsbyte:
2355 case X86::BI__readfsword:
2356 case X86::BI__readfsdword:
2357 case X86::BI__readfsqword:
2358 case X86::BI__readgsbyte:
2359 case X86::BI__readgsword:
2360 case X86::BI__readgsdword:
2361 case X86::BI__readgsqword:
2362 case X86::BI__builtin_ia32_encodekey128_u32:
2363 case X86::BI__builtin_ia32_encodekey256_u32: {
2364 cgm.errorNYI(expr->getSourceRange(),
2365 std::string("unimplemented X86 builtin call: ") +
2366 getContext().BuiltinInfo.getName(builtinID));
2367 return mlir::Value{};
2368 }
2369 case X86::BI__builtin_ia32_aesenc128kl_u8:
2370 case X86::BI__builtin_ia32_aesdec128kl_u8:
2371 case X86::BI__builtin_ia32_aesenc256kl_u8:
2372 case X86::BI__builtin_ia32_aesdec256kl_u8: {
2373 llvm::StringRef intrinsicName;
2374 switch (builtinID) {
2375 default:
2376 llvm_unreachable("Unexpected builtin");
2377 case X86::BI__builtin_ia32_aesenc128kl_u8:
2378 intrinsicName = "x86.aesenc128kl";
2379 break;
2380 case X86::BI__builtin_ia32_aesdec128kl_u8:
2381 intrinsicName = "x86.aesdec128kl";
2382 break;
2383 case X86::BI__builtin_ia32_aesenc256kl_u8:
2384 intrinsicName = "x86.aesenc256kl";
2385 break;
2386 case X86::BI__builtin_ia32_aesdec256kl_u8:
2387 intrinsicName = "x86.aesdec256kl";
2388 break;
2389 }
2390
2391 return emitX86Aes(builder, getLoc(expr->getExprLoc()), intrinsicName,
2392 convertType(expr->getType()), ops);
2393 }
2394 case X86::BI__builtin_ia32_aesencwide128kl_u8:
2395 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
2396 case X86::BI__builtin_ia32_aesencwide256kl_u8:
2397 case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
2398 llvm::StringRef intrinsicName;
2399 switch (builtinID) {
2400 default:
2401 llvm_unreachable("Unexpected builtin");
2402 case X86::BI__builtin_ia32_aesencwide128kl_u8:
2403 intrinsicName = "x86.aesencwide128kl";
2404 break;
2405 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
2406 intrinsicName = "x86.aesdecwide128kl";
2407 break;
2408 case X86::BI__builtin_ia32_aesencwide256kl_u8:
2409 intrinsicName = "x86.aesencwide256kl";
2410 break;
2411 case X86::BI__builtin_ia32_aesdecwide256kl_u8:
2412 intrinsicName = "x86.aesdecwide256kl";
2413 break;
2414 }
2415
2416 return emitX86Aeswide(builder, getLoc(expr->getExprLoc()), intrinsicName,
2417 convertType(expr->getType()), ops);
2418 }
2419 case X86::BI__builtin_ia32_vfcmaddcph512_mask:
2420 case X86::BI__builtin_ia32_vfmaddcph512_mask:
2421 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
2422 case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
2423 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
2424 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
2425 case X86::BI__builtin_ia32_prefetchi:
2426 cgm.errorNYI(expr->getSourceRange(),
2427 std::string("unimplemented X86 builtin call: ") +
2428 getContext().BuiltinInfo.getName(builtinID));
2429 return mlir::Value{};
2430 }
2431}
Defines enum values for all the target-independent builtin functions.
static mlir::Value emitX86vpcom(CIRGenBuilderTy &builder, mlir::Location loc, llvm::SmallVector< mlir::Value > ops, bool isSigned)
static std::optional< mlir::Value > emitX86ConvertToMask(CIRGenFunction &cgf, CIRGenBuilderTy &builder, mlir::Value in, mlir::Location loc)
static mlir::Value emitX86CompressExpand(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Value source, mlir::Value mask, mlir::Value inputVector, const std::string &id)
static void computeFullLaneShuffleMask(CIRGenFunction &cgf, const mlir::Value vec, uint32_t imm, const bool isShufP, llvm::SmallVectorImpl< int64_t > &outIndices)
static mlir::Value emitVectorFCmp(CIRGenBuilderTy &builder, llvm::SmallVector< mlir::Value > &ops, mlir::Location loc, cir::CmpOpKind pred, bool shouldInvert)
static mlir::Value emitX86MaskLogic(CIRGenBuilderTy &builder, mlir::Location loc, cir::BinOpKind binOpKind, SmallVectorImpl< mlir::Value > &ops, bool invertLHS=false)
static std::optional< mlir::Value > emitX86MaskedCompare(CIRGenFunction &cgf, CIRGenBuilderTy &builder, unsigned cc, bool isSigned, ArrayRef< mlir::Value > ops, mlir::Location loc)
static std::optional< mlir::Value > emitX86SExtMask(CIRGenBuilderTy &builder, mlir::Value op, mlir::Type dstTy, mlir::Location loc)
static mlir::Value emitPrefetch(CIRGenFunction &cgf, unsigned builtinID, const CallExpr *e, const SmallVector< mlir::Value > &ops)
static mlir::Value emitX86Aeswide(CIRGenBuilderTy &builder, mlir::Location loc, llvm::StringRef intrinsicName, mlir::Type retType, llvm::ArrayRef< mlir::Value > ops)
static mlir::Value emitX86MaskTest(CIRGenBuilderTy &builder, mlir::Location loc, const std::string &intrinsicName, SmallVectorImpl< mlir::Value > &ops)
static mlir::Value emitVecInsert(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Value vec, mlir::Value value, mlir::Value indexOp)
static mlir::Value emitX86Fpclass(CIRGenBuilderTy &builder, mlir::Location loc, unsigned builtinID, SmallVectorImpl< mlir::Value > &ops)
static mlir::Value emitX86MaskUnpack(CIRGenBuilderTy &builder, mlir::Location loc, const std::string &intrinsicName, SmallVectorImpl< mlir::Value > &ops)
static cir::VecShuffleOp emitPshufWord(CIRGenBuilderTy &builder, const mlir::Value vec, const mlir::Value immediate, const mlir::Location loc, const bool isLow)
static mlir::Value emitX86MaskedCompareResult(CIRGenBuilderTy &builder, mlir::Value cmp, unsigned numElts, mlir::Value maskIn, mlir::Location loc)
static mlir::Value emitX86Select(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Value mask, mlir::Value op0, mlir::Value op1)
static mlir::Value emitX86Aes(CIRGenBuilderTy &builder, mlir::Location loc, llvm::StringRef intrinsicName, mlir::Type retType, llvm::ArrayRef< mlir::Value > ops)
static mlir::Value emitX86CvtF16ToFloatExpr(CIRGenBuilderTy &builder, mlir::Location loc, llvm::ArrayRef< mlir::Value > ops, mlir::Type dstTy)
static mlir::Value emitX86FunnelShift(CIRGenBuilderTy &builder, mlir::Location location, mlir::Value &op0, mlir::Value &op1, mlir::Value &amt, bool isRight)
static mlir::Value emitX86Muldq(CIRGenBuilderTy &builder, mlir::Location loc, bool isSigned, SmallVectorImpl< mlir::Value > &ops, unsigned opTypePrimitiveSizeInBits)
static mlir::Value getMaskVecValue(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Value mask, unsigned numElems)
static mlir::Value emitX86MaskAddLogic(CIRGenBuilderTy &builder, mlir::Location loc, const std::string &intrinsicName, SmallVectorImpl< mlir::Value > &ops)
TokenType getType() const
Returns the token's type, e.g.
Enumerates target-specific builtins in their own namespaces within namespace clang.
__device__ __2f16 b
__device__ __2f16 float c
mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, const llvm::APInt &val)
mlir::Value createShift(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, bool isShiftLeft)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
mlir::Value createAnd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
cir::VecCmpOp createVecCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createBinop(mlir::Location loc, mlir::Value lhs, cir::BinOpKind kind, mlir::Value rhs)
mlir::Value createMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, OverflowBehavior ob=OverflowBehavior::None)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
@ GE_None
No error.
mlir::Value getPointer() const
Definition Address.h:95
cir::ConstantOp getUInt64(uint64_t c, mlir::Location loc)
mlir::Value emitIntrinsicCallOp(mlir::Location loc, const llvm::StringRef str, const mlir::Type &resTy, Operands &&...op)
cir::IntType getSIntNTy(int n)
cir::ConstantOp getUInt32(uint32_t c, mlir::Location loc)
cir::VecShuffleOp createVecShuffle(mlir::Location loc, mlir::Value vec1, mlir::Value vec2, llvm::ArrayRef< mlir::Attribute > maskAttrs)
cir::RecordType getAnonRecordTy(llvm::ArrayRef< mlir::Type > members, bool packed=false, bool padded=false)
Get a CIR anonymous record type.
cir::LoadOp createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, llvm::MaybeAlign align)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::IntType getUIntNTy(int n)
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
mlir::Type convertType(clang::QualType t)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
static int64_t getSExtIntValueFromConstOp(mlir::Value val)
Get integer from a mlir::Value that is an int constant or a constant op.
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool msvcBuiltins()
static bool handleBuiltinICEArguments()
static bool cgFPOptionsRAII()
static bool emitConstrainedFPCall()
static bool fastMathFlags()
cir::PointerType allocaInt8PtrTy
void* in alloca address space
cir::PointerType voidPtrTy
void* in address space 0