187 const llvm::BitVector &resultTypeRequiresCast,
188 const llvm::BitVector &resultRegIsFlagReg) {
191 mlir::MLIRContext *ctx = builder.getContext();
193 assert(regResults.size() == resultRegTypes.size());
194 assert(regResults.size() == resultTruncRegTypes.size());
195 assert(regResults.size() == resultRegDests.size());
199 assert(resultTypeRequiresCast.size() <= resultRegDests.size());
200 assert(resultRegIsFlagReg.size() <= resultRegDests.size());
202 for (
unsigned i = 0, e = regResults.size(); i != e; ++i) {
203 mlir::Value tmp = regResults[i];
204 mlir::Type truncTy = resultTruncRegTypes[i];
206 if (i < resultRegIsFlagReg.size() && resultRegIsFlagReg[i])
211 if (resultRegTypes[i] != truncTy) {
215 if (mlir::isa<cir::FPTypeInterface>(truncTy)) {
221 tmp, cir::IntType::get(ctx, (
unsigned)resSize,
false));
227 tmp, cir::IntType::get(ctx, (
unsigned)tmpSize,
false));
236 LValue dest = resultRegDests[i];
239 if ((i < resultTypeRequiresCast.size()) && resultTypeRequiresCast[i]) {
242 dest.getAddress().withElementType(builder, resultRegTypes[i]);
251 const Expr *outExpr =
s.getOutputExpr(i);
253 diag::err_store_value_to_reg);
265 std::string asmString =
s.generateAsmString(
getContext());
267 mlir::Location loc =
getLoc(srcLoc);
273 inputConstraintInfos);
275 bool isGCCAsmGoto =
false;
277 std::string constraints;
288 llvm::BitVector resultTypeRequiresCast;
289 llvm::BitVector resultRegIsFlagReg;
292 std::string inOutConstraints;
300 llvm::SmallSet<std::string, 8> physRegOutputs;
308 bool readOnly =
true, readNone =
true;
310 std::string outputConstraint;
311 for (
unsigned i = 0, e =
s.getNumOutputs(); i != e; ++i) {
315 outputConstraint =
s.getOutputConstraint(i);
317 StringRef(outputConstraint).drop_front());
319 const Expr *outExpr =
s.getOutputExpr(i);
323 outputConstraint =
s.addVariableConstraints(
325 [&](
const Stmt *unspStmt, StringRef msg) {
326 cgm.errorUnsupported(unspStmt, msg);
331 if (!gccReg.empty() && !physRegOutputs.insert(gccReg).second)
332 cgm.error(srcLoc,
"multiple outputs to hard register: " + gccReg);
334 outputConstraints.push_back(outputConstraint);
337 if (!constraints.empty())
343 const bool isScalarOrAggregate =
346 constraints +=
"=" + outputConstraint;
347 resultRegQualTys.push_back(qty);
348 resultRegDests.push_back(dest);
350 bool isFlagReg = llvm::StringRef(outputConstraint).starts_with(
"{@cc");
351 resultRegIsFlagReg.push_back(isFlagReg);
354 const bool requiresCast =
356 (
cgm.getTargetCIRGenInfo().isScalarizableAsmOperand(*
this, ty) ||
359 resultTruncRegTypes.push_back(ty);
360 resultTypeRequiresCast.push_back(requiresCast);
365 cgm.error(outExpr->
getExprLoc(),
"output size should not be zero");
370 resultRegTypes.push_back(ty);
376 for (inputNo = 0; inputNo !=
s.getNumInputs(); ++inputNo) {
381 assert(inputNo !=
s.getNumInputs() &&
"Didn't find matching input!");
383 QualType inputTy =
s.getInputExpr(inputNo)->getType();
387 if (
getContext().getTypeSize(outputType) < inputSize) {
392 if (mlir::Type adjTy =
cgm.getTargetCIRGenInfo().adjustInlineAsmType(
393 *
this, outputConstraint, resultRegTypes.back()))
394 resultRegTypes.back() = adjTy;
396 cgm.getDiags().Report(srcLoc, diag::err_asm_invalid_type_in_input)
397 << outExpr->
getType() << outputConstraint;
414 argTypes.push_back(destAddr.
getType());
419 constraints += outputConstraint;
420 readOnly = readNone =
false;
424 inOutConstraints +=
',';
425 const Expr *inputExpr =
s.getOutputExpr(i);
428 auto [argValue, argElementType] =
433 *
this, outputConstraint, argValue.getType()))
434 argValue = builder.createBitcast(argValue, adjTy);
441 inOutConstraints += llvm::utostr(i);
443 inOutConstraints += outputConstraint;
445 inOutArgTypes.push_back(argValue.getType());
446 inOutArgElemTypes.push_back(argElementType);
447 inOutArgs.push_back(argValue);
452 for (
unsigned i = 0, e =
s.getNumInputs(); i != e; ++i) {
454 const Expr *inputExpr =
s.getInputExpr(i);
459 if (!constraints.empty())
462 std::string inputConstraint(
s.getInputConstraint(i));
466 inputConstraint =
s.addVariableConstraints(
469 [&](
const Stmt *unspStmt, StringRef msg) {
470 cgm.errorUnsupported(unspStmt, msg);
473 std::string replaceConstraint(inputConstraint);
475 auto [argValue, argElemType] =
emitAsmInput(info, inputExpr, constraints);
484 QualType outputType =
s.getOutputExpr(output)->getType();
491 argValue = builder.createPtrToInt(argValue,
uIntPtrTy);
494 argValue = builder.createIntCast(argValue, outputTy);
496 argValue = builder.createIntCast(argValue,
uIntPtrTy);
498 argValue = builder.createFloatingCast(argValue, outputTy);
502 replaceConstraint = outputConstraints[output];
506 *
this, replaceConstraint, argValue.getType()))
507 argValue = builder.createBitcast(argValue, adjTy);
509 cgm.getDiags().Report(
s.getAsmLoc(), diag::err_asm_invalid_type_in_input)
510 << inputExpr->
getType() << inputConstraint;
515 argTypes.push_back(argValue.getType());
516 argElemTypes.push_back(argElemType);
517 inArgs.push_back(argValue);
518 args.push_back(argValue);
519 constraints += inputConstraint;
523 for (
unsigned i = 0, e = inOutArgs.size(); i != e; ++i) {
524 args.push_back(inOutArgs[i]);
525 argTypes.push_back(inOutArgTypes[i]);
526 argElemTypes.push_back(inOutArgElemTypes[i]);
528 constraints += inOutConstraints;
530 bool hasUnwindClobber =
false;
531 collectClobbers(*
this,
s, constraints, hasUnwindClobber, readOnly, readNone);
533 std::array<mlir::ValueRange, 3> operands = {outArgs, inArgs, inOutArgs};
535 mlir::Type resultType;
537 if (resultRegTypes.size() == 1)
538 resultType = resultRegTypes[0];
539 else if (resultRegTypes.size() > 1)
540 resultType = builder.getAnonRecordTy(resultRegTypes,
false,
543 bool hasSideEffect =
s.isVolatile() ||
s.getNumOutputs() == 0;
545 std::vector<mlir::Value> regResults;
546 cir::InlineAsmOp ia = cir::InlineAsmOp::create(
547 builder,
getLoc(
s.getAsmLoc()), resultType, operands, asmString,
552 }
else if (hasUnwindClobber) {
558 if (ia.getNumResults())
559 result = ia.getResult(0);
564 for (
auto typ : argElemTypes) {
567 assert(mlir::isa<cir::PointerType>(op.getType()) &&
568 "pointer type expected");
570 "element type differs from pointee type!");
577 operandAttrs.push_back(mlir::Attribute());
580 assert(args.size() == operandAttrs.size() &&
581 "The number of attributes is not even with the number of operands");
583 ia.setOperandAttrsAttr(builder.getArrayAttr(operandAttrs));
585 if (resultRegTypes.size() == 1) {
586 regResults.push_back(result);
587 }
else if (resultRegTypes.size() > 1) {
590 emitAlloca(
"__asm_result", resultType, loc, alignment,
false);
592 builder.createStore(loc, result, addr);
594 for (
unsigned i = 0, e = resultRegTypes.size(); i != e; ++i) {
595 cir::PointerType typ = builder.getPointerTo(resultRegTypes[i]);
596 cir::GetMemberOp ptr = builder.createGetMember(loc, typ, dest,
"", i);
597 cir::LoadOp tmp = builder.createLoad(loc,
Address(ptr, alignment));
598 regResults.push_back(tmp);
603 emitAsmStores(*
this,
s, regResults, resultRegTypes, resultTruncRegTypes,
604 resultRegDests, resultRegQualTys, resultTypeRequiresCast,
607 return mlir::success();
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...