10#include "TargetInfo.h"
12#include "llvm/Support/AMDGPUAddrSpace.h"
25 static const unsigned MaxNumRegsForArgsRet = 16;
27 unsigned numRegsForType(
QualType Ty)
const;
31 uint64_t Members)
const override;
34 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty,
unsigned FromAS,
35 unsigned ToAS)
const {
37 auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
38 if (PtrTy && PtrTy->getAddressSpace() == FromAS)
39 return llvm::PointerType::get(Ty->getContext(), ToAS);
50 unsigned &NumRegsLeft)
const;
56 llvm::FixedVectorType *
62 if (
T->getNumElements() == 3 && getDataLayout().getTypeSizeInBits(
T) == 96)
68bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(
QualType Ty)
const {
72bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
73 const Type *
Base, uint64_t Members)
const {
74 uint32_t NumRegs = (getContext().getTypeSize(
Base) + 31) / 32;
77 return Members * NumRegs <= MaxNumRegsForArgsRet;
81unsigned AMDGPUABIInfo::numRegsForType(
QualType Ty)
const {
87 QualType EltTy = VT->getElementType();
88 unsigned EltSize = getContext().getTypeSize(EltTy);
92 return (VT->getNumElements() + 1) / 2;
94 unsigned EltNumRegs = (EltSize + 31) / 32;
95 return EltNumRegs * VT->getNumElements();
104 NumRegs += numRegsForType(FieldTy);
110 return (getContext().getTypeSize(Ty) + 31) / 32;
119 unsigned ArgumentIndex = 0;
122 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
124 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
125 Arg.info = classifyKernelArgumentType(Arg.type);
127 bool FixedArgument = ArgumentIndex++ < numFixedArguments;
135 const bool IsIndirect =
false;
136 const bool AllowHigherAlign =
false;
138 getContext().getTypeInfoInChars(Ty),
170 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
174 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
193 llvm::Type *OrigLTy = CGT.ConvertType(Ty);
194 llvm::Type *LTy = OrigLTy;
195 if (getContext().getLangOpts().
HIP) {
196 LTy = coerceKernelArgumentType(
197 OrigLTy, getContext().getTargetAddressSpace(LangAS::Default),
198 getContext().getTargetAddressSpace(LangAS::cuda_device));
207 if (!getContext().getLangOpts().
OpenCL && LTy == OrigLTy &&
210 getContext().getTypeAlignInChars(Ty),
211 getContext().getTargetAddressSpace(LangAS::opencl_constant),
222 unsigned &NumRegsLeft)
const {
223 assert(NumRegsLeft <= MaxNumRegsForArgsRet &&
"register estimate underflow");
260 unsigned NumRegs = (
Size + 31) / 32;
261 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
270 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
274 if (NumRegsLeft > 0) {
275 unsigned NumRegs = numRegsForType(Ty);
276 if (NumRegsLeft >= NumRegs) {
277 NumRegsLeft -= NumRegs;
285 getContext().getTypeAlignInChars(Ty),
286 getContext().getTargetAddressSpace(LangAS::opencl_private));
292 unsigned NumRegs = numRegsForType(Ty);
293 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
304 void setFunctionDeclAttributes(
const FunctionDecl *FD, llvm::Function *F,
314 llvm::PointerType *
T,
QualType QT)
const override;
318 getABIInfo().getDataLayout().getAllocaAddrSpace());
324 llvm::AtomicOrdering Ordering,
325 llvm::LLVMContext &Ctx)
const override;
327 llvm::Instruction &AtomicInst,
330 llvm::Function *BlockInvokeFunc,
331 llvm::Type *BlockTy)
const override;
339 llvm::GlobalValue *GV) {
340 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
343 return !
D->
hasAttr<OMPDeclareTargetDeclAttr>() &&
345 (isa<FunctionDecl>(
D) &&
D->
hasAttr<CUDAGlobalAttr>()) ||
348 cast<VarDecl>(
D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
349 cast<VarDecl>(
D)->getType()->isCUDADeviceBuiltinTextureType())));
352void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
354 const auto *ReqdWGS =
356 const bool IsOpenCLKernel =
360 const auto *FlatWGS = FD->
getAttr<AMDGPUFlatWorkGroupSizeAttr>();
361 if (ReqdWGS || FlatWGS) {
363 }
else if (IsOpenCLKernel || IsHIPKernel) {
366 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
367 const unsigned DefaultMaxWorkGroupSize =
368 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
370 std::string AttrVal =
371 std::string(
"1,") + llvm::utostr(DefaultMaxWorkGroupSize);
372 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
375 if (
const auto *
Attr = FD->
getAttr<AMDGPUWavesPerEUAttr>())
378 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumSGPRAttr>()) {
379 unsigned NumSGPR =
Attr->getNumSGPR();
382 F->addFnAttr(
"amdgpu-num-sgpr", llvm::utostr(NumSGPR));
385 if (
const auto *
Attr = FD->
getAttr<AMDGPUNumVGPRAttr>()) {
389 F->addFnAttr(
"amdgpu-num-vgpr", llvm::utostr(NumVGPR));
392 if (
const auto *
Attr = FD->
getAttr<AMDGPUMaxNumWorkGroupsAttr>()) {
398 ?
Attr->getMaxNumWorkGroupsY()
403 ?
Attr->getMaxNumWorkGroupsZ()
409 llvm::raw_svector_ostream OS(AttrVal);
410 OS <<
X <<
',' << Y <<
',' << Z;
412 F->addFnAttr(
"amdgpu-max-num-workgroups", AttrVal.str());
418void AMDGPUTargetCodeGenInfo::emitTargetGlobals(
420 StringRef Name =
"__oclc_ABI_version";
421 llvm::GlobalVariable *OriginalGV = CGM.
getModule().getNamedGlobal(Name);
422 if (OriginalGV && !llvm::GlobalVariable::isExternalLinkage(OriginalGV->getLinkage()))
426 llvm::CodeObjectVersionKind::COV_None)
429 auto *
Type = llvm::IntegerType::getIntNTy(CGM.
getModule().getContext(), 32);
430 llvm::Constant *COV = llvm::ConstantInt::get(
435 auto *GV =
new llvm::GlobalVariable(
436 CGM.
getModule(),
Type,
true, llvm::GlobalValue::WeakODRLinkage, COV, Name,
437 nullptr, llvm::GlobalValue::ThreadLocalMode::NotThreadLocal,
439 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Local);
440 GV->setVisibility(llvm::GlobalValue::VisibilityTypes::HiddenVisibility);
444 OriginalGV->replaceAllUsesWith(GV);
445 GV->takeName(OriginalGV);
446 OriginalGV->eraseFromParent();
450void AMDGPUTargetCodeGenInfo::setTargetAttributes(
453 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
454 GV->setDSOLocal(
true);
457 if (GV->isDeclaration())
460 llvm::Function *F = dyn_cast<llvm::Function>(GV);
466 setFunctionDeclAttributes(FD, F, M);
468 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
469 F->addFnAttr(
"amdgpu-ieee",
"false");
472unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv()
const {
473 return llvm::CallingConv::AMDGPU_KERNEL;
481llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
485 return llvm::ConstantPointerNull::get(PT);
488 auto NPT = llvm::PointerType::get(
490 return llvm::ConstantExpr::getAddrSpaceCast(
491 llvm::ConstantPointerNull::get(NPT), PT);
495AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(
CodeGenModule &CGM,
499 "Address space agnostic languages only");
503 return DefaultGlobalAS;
505 LangAS AddrSpace =
D->getType().getAddressSpace();
506 if (AddrSpace != LangAS::Default)
510 if (
D->getType().isConstantStorage(CGM.
getContext(),
false,
false) &&
511 D->hasConstantInitialization()) {
515 return DefaultGlobalAS;
519AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(
const LangOptions &LangOpts,
521 llvm::AtomicOrdering Ordering,
522 llvm::LLVMContext &Ctx)
const {
525 case SyncScope::HIPSingleThread:
526 case SyncScope::SingleScope:
527 Name =
"singlethread";
529 case SyncScope::HIPWavefront:
530 case SyncScope::OpenCLSubGroup:
531 case SyncScope::WavefrontScope:
534 case SyncScope::HIPWorkgroup:
535 case SyncScope::OpenCLWorkGroup:
536 case SyncScope::WorkgroupScope:
539 case SyncScope::HIPAgent:
540 case SyncScope::OpenCLDevice:
541 case SyncScope::DeviceScope:
544 case SyncScope::SystemScope:
545 case SyncScope::HIPSystem:
546 case SyncScope::OpenCLAllSVMDevices:
553 if (
Scope >= SyncScope::OpenCLWorkGroup &&
554 Scope <= SyncScope::OpenCLSubGroup &&
555 Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
557 Name = Twine(Twine(Name) + Twine(
"-")).str();
559 Name = Twine(Twine(Name) + Twine(
"one-as")).str();
562 return Ctx.getOrInsertSyncScopeID(Name);
565void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(
568 auto *RMW = dyn_cast<llvm::AtomicRMWInst>(&AtomicInst);
569 auto *CmpX = dyn_cast<llvm::AtomicCmpXchgInst>(&AtomicInst);
576 if (((RMW && RMW->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS) ||
578 CmpX->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS)) &&
581 llvm::MDNode *ASRange = MDHelper.createRange(
582 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS),
583 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS + 1));
584 AtomicInst.setMetadata(llvm::LLVMContext::MD_noalias_addrspace, ASRange);
592 llvm::AtomicRMWInst::BinOp RMWOp = RMW->getOperation();
593 if (llvm::AtomicRMWInst::isFPOperation(RMWOp)) {
595 RMW->setMetadata(
"amdgpu.no.fine.grained.memory",
Empty);
597 if (RMWOp == llvm::AtomicRMWInst::FAdd && RMW->getType()->isFloatTy())
598 RMW->setMetadata(
"amdgpu.ignore.denormal.mode",
Empty);
602bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases()
const {
606bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators()
const {
610void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
612 FT = getABIInfo().getContext().adjustFunctionType(
624llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
625 CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy)
const {
629 auto *InvokeFT = Invoke->getFunctionType();
638 ArgTys.push_back(BlockTy);
639 ArgTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
640 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
641 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"__block_literal"));
642 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
643 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
644 ArgNames.push_back(llvm::MDString::get(
C,
"block_literal"));
645 for (
unsigned I = 1,
E = InvokeFT->getNumParams(); I <
E; ++I) {
646 ArgTys.push_back(InvokeFT->getParamType(I));
647 ArgTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
648 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
649 AccessQuals.push_back(llvm::MDString::get(
C,
"none"));
650 ArgBaseTypeNames.push_back(llvm::MDString::get(
C,
"void*"));
651 ArgTypeQuals.push_back(llvm::MDString::get(
C,
""));
653 llvm::MDString::get(
C, (Twine(
"local_arg") + Twine(I)).str()));
655 std::string Name = Invoke->getName().str() +
"_kernel";
656 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(
C), ArgTys,
false);
657 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
659 F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
661 llvm::AttrBuilder KernelAttrs(
C);
665 KernelAttrs.addAttribute(
"enqueued-block");
666 F->addFnAttrs(KernelAttrs);
668 auto IP = CGF.
Builder.saveIP();
669 auto *BB = llvm::BasicBlock::Create(
C,
"entry", F);
670 Builder.SetInsertPoint(BB);
672 auto *BlockPtr = Builder.CreateAlloca(BlockTy,
nullptr);
673 BlockPtr->setAlignment(BlockAlign);
674 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
675 auto *
Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
677 Args.push_back(Cast);
678 for (llvm::Argument &A : llvm::drop_begin(F->args()))
680 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
681 call->setCallingConv(Invoke->getCallingConv());
682 Builder.CreateRetVoid();
683 Builder.restoreIP(IP);
685 F->setMetadata(
"kernel_arg_addr_space", llvm::MDNode::get(
C, AddressQuals));
686 F->setMetadata(
"kernel_arg_access_qual", llvm::MDNode::get(
C, AccessQuals));
687 F->setMetadata(
"kernel_arg_type", llvm::MDNode::get(
C, ArgTypeNames));
688 F->setMetadata(
"kernel_arg_base_type",
689 llvm::MDNode::get(
C, ArgBaseTypeNames));
690 F->setMetadata(
"kernel_arg_type_qual", llvm::MDNode::get(
C, ArgTypeQuals));
692 F->setMetadata(
"kernel_arg_name", llvm::MDNode::get(
C, ArgNames));
698 llvm::Function *F,
const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,
699 const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,
700 int32_t *MaxThreadsVal) {
704 Min = FlatWGS->getMin()->EvaluateKnownConstInt(
getContext()).getExtValue();
705 Max = FlatWGS->getMax()->EvaluateKnownConstInt(
getContext()).getExtValue();
707 if (ReqdWGS &&
Min == 0 &&
Max == 0)
708 Min =
Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
711 assert(
Min <=
Max &&
"Min must be less than or equal Max");
714 *MinThreadsVal =
Min;
716 *MaxThreadsVal =
Max;
717 std::string AttrVal = llvm::utostr(
Min) +
"," + llvm::utostr(
Max);
719 F->addFnAttr(
"amdgpu-flat-work-group-size", AttrVal);
721 assert(
Max == 0 &&
"Max must be zero");
725 llvm::Function *F,
const AMDGPUWavesPerEUAttr *
Attr) {
727 Attr->getMin()->EvaluateKnownConstInt(
getContext()).getExtValue();
730 ?
Attr->getMax()->EvaluateKnownConstInt(
getContext()).getExtValue()
734 assert((
Max == 0 ||
Min <=
Max) &&
"Min must be less than or equal Max");
736 std::string AttrVal = llvm::utostr(
Min);
738 AttrVal = AttrVal +
"," + llvm::utostr(
Max);
739 F->addFnAttr(
"amdgpu-waves-per-eu", AttrVal);
741 assert(
Max == 0 &&
"Max must be zero");
744std::unique_ptr<TargetCodeGenInfo>
746 return std::make_unique<AMDGPUTargetCodeGenInfo>(CGM.
getTypes());
static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)
Defines the clang::TargetOptions class.
uint64_t getTargetNullPointerValue(QualType QT) const
Get target-dependent integer value for null pointer which is used for constant folding.
unsigned getTargetAddressSpace(LangAS AS) const
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
bool threadPrivateMemoryAtomicsAreUndefined() const
Return true if atomics operations targeting allocations in private memory are undefined.
Attr - This represents one attribute.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getIndirectAliased(CharUnits Alignment, unsigned AddrSpace, bool Realign=false, llvm::Type *Padding=nullptr)
Pass this in memory using the IR byref attribute.
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const
virtual llvm::FixedVectorType * getOptimalVectorMemoryType(llvm::FixedVectorType *T, const LangOptions &Opt) const
Returns the optimal vector memory type based on the given vector type.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
unsigned getNumRequiredArgs() const
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
const TargetInfo & getTarget() const
llvm::LLVMContext & getLLVMContext()
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
void handleAMDGPUWavesPerEUAttr(llvm::Function *F, const AMDGPUWavesPerEUAttr *A)
Emit the IR encoding to attach the AMD GPU waves-per-eu attribute to F.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
void handleAMDGPUFlatWorkGroupSizeAttr(llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *A, const ReqdWorkGroupSizeAttr *ReqdWGS=nullptr, int32_t *MinThreadsVal=nullptr, int32_t *MaxThreadsVal=nullptr)
Emit the IR encoding to attach the AMD GPU flat-work-group-size attribute to F.
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
DefaultABIInfo - The default implementation for ABI specific details.
ABIArgInfo classifyArgumentType(QualType RetTy) const
RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, AggValueSlot Slot) const override
EmitVAArg - Emit the target dependent code to load a value of.
ABIArgInfo classifyReturnType(QualType RetTy) const
void computeInfo(CGFunctionInfo &FI) const override
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
const T & getABIInfo() const
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const
Get target favored AST address space of a global variable for languages other than OpenCL and CUDA.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
virtual bool shouldEmitDWARFBitFieldSeparators() const
virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const
Get target specific null pointer.
virtual LangAS getASTAllocaAddressSpace() const
Get the AST address space for alloca.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
virtual llvm::Value * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Type *BlockTy) const
Create an OpenCL kernel for an enqueued block.
virtual void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const
Provides a convenient hook to handle extra target-specific globals.
virtual bool shouldEmitStaticExternCAliases() const
Decl - This represents one declaration (or definition), e.g.
This represents one expression.
Represents a member of a struct/union/class.
Represents a function declaration or definition.
ExtInfo withCallingConv(CallingConv cc) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
A (possibly-)qualified type.
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_range fields() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Scope - A scope is a transient data structure that is used while parsing the program.
TargetOptions & getTargetOpts() const
Retrieve the target options.
virtual std::optional< LangAS > getConstantAddressSpace() const
Return an AST address space which can be used opportunistically for constant global memory.
bool allowAMDGPUUnsafeFPAtomics() const
Returns whether or not the AMDGPU unsafe floating point atomics are allowed.
llvm::CodeObjectVersionKind CodeObjectVersion
Code object version for AMDGPU.
The base class of the type hierarchy.
const T * getAs() const
Member-template getAs<specific type>'.
Represents a variable declaration or definition.
Represents a GCC generic vector type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM)
RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Cast(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
LangAS
Defines the address space values used by the address space qualifier of QualType.
const FunctionProtoType * T
SyncScope
Defines synch scope values used internally by clang.
LangAS getLangASFromTargetAS(unsigned TargetAS)