-
Notifications
You must be signed in to change notification settings - Fork 14.7k
[CodeGen] Provide original IR type to CC lowering (NFC) #152709
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
It is common to have ABI requirements for illegal types: For example, two i64 argument parts that originally came from an fp128 argument may have a different ABI than ones that came from a i128 argument. The current calling convention lowering does not provide access to this information, so backends come up with various hacks to support it (like additional pre-analysis cached in CCState, or bypassing the default logic entirely). This PR adds the original IR type to InputArg/OutputArg and passes it down to CCAssignFn. It is not actually used anywhere yet, this just does the mechanical changes to thread through the new argument.
@llvm/pr-subscribers-backend-x86 @llvm/pr-subscribers-backend-powerpc Author: Nikita Popov (nikic) ChangesIt is common to have ABI requirements for illegal types: For example, two i64 argument parts that originally came from an fp128 argument may have a different call ABI than ones that came from a i128 argument. The current calling convention lowering does not provide access to this information, so backends come up with various hacks to support it (like additional pre-analysis cached in CCState, or bypassing the default logic entirely). This PR adds the original IR type to InputArg/OutputArg and passes it down to CCAssignFn. It is not actually used anywhere yet, this just does the mechanical changes to thread through the new argument. Patch is 45.47 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/152709.diff 28 Files Affected:
diff --git a/llvm/include/llvm/CodeGen/CallingConvLower.h b/llvm/include/llvm/CodeGen/CallingConvLower.h
index a114a0c40a083..c7281ff1a99e6 100644
--- a/llvm/include/llvm/CodeGen/CallingConvLower.h
+++ b/llvm/include/llvm/CodeGen/CallingConvLower.h
@@ -154,9 +154,9 @@ struct ForwardedRegister {
/// CCAssignFn - This function assigns a location for Val, updating State to
/// reflect the change. It returns 'true' if it failed to handle Val.
-typedef bool CCAssignFn(unsigned ValNo, MVT ValVT,
- MVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+typedef bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT,
+ CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
+ Type *OrigTy, CCState &State);
/// CCCustomFn - This function assigns a location for Val, possibly updating
/// all args to reflect changes and indicates if it handled it. It must set
@@ -290,6 +290,7 @@ class CCState {
/// and argument flags.
LLVM_ABI void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+ SmallVectorImpl<Type *> &OrigTys,
CCAssignFn Fn);
/// The function will invoke AnalyzeCallOperands.
@@ -310,7 +311,7 @@ class CCState {
/// AnalyzeCallResult - Same as above except it's specialized for calls which
/// produce a single value.
- LLVM_ABI void AnalyzeCallResult(MVT VT, CCAssignFn Fn);
+ LLVM_ABI void AnalyzeCallResult(MVT VT, Type *OrigTy, CCAssignFn Fn);
/// getFirstUnallocated - Return the index of the first unallocated register
/// in the set, or Regs.size() if they are all allocated.
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 8d98255f03f8a..a8bde824527a5 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -198,7 +198,7 @@ class LLVM_ABI CallLowering {
CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
ISD::ArgFlagsTy Flags, CCState &State) {
if (getAssignFn(State.isVarArg())(ValNo, ValVT, LocVT, LocInfo, Flags,
- State))
+ Info.Ty, State))
return true;
StackSize = State.getStackSize();
return false;
diff --git a/llvm/include/llvm/CodeGen/TargetCallingConv.h b/llvm/include/llvm/CodeGen/TargetCallingConv.h
index ca76c04767301..6e80117592d5c 100644
--- a/llvm/include/llvm/CodeGen/TargetCallingConv.h
+++ b/llvm/include/llvm/CodeGen/TargetCallingConv.h
@@ -205,6 +205,7 @@ namespace ISD {
ArgFlagsTy Flags;
MVT VT = MVT::Other;
EVT ArgVT;
+ Type *OrigTy = nullptr;
bool Used = false;
/// Index original Function's argument.
@@ -218,9 +219,10 @@ namespace ISD {
unsigned PartOffset;
InputArg() = default;
- InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool used,
+ InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, Type *OrigTy, bool used,
unsigned origIdx, unsigned partOffs)
- : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
+ : Flags(flags), OrigTy(OrigTy), Used(used), OrigArgIndex(origIdx),
+ PartOffset(partOffs) {
VT = vt.getSimpleVT();
ArgVT = argvt;
}
@@ -243,6 +245,7 @@ namespace ISD {
ArgFlagsTy Flags;
MVT VT;
EVT ArgVT;
+ Type *OrigTy = nullptr;
/// Index original Function's argument.
unsigned OrigArgIndex;
@@ -253,9 +256,10 @@ namespace ISD {
unsigned PartOffset;
OutputArg() = default;
- OutputArg(ArgFlagsTy flags, MVT vt, EVT argvt, unsigned origIdx,
- unsigned partOffs)
- : Flags(flags), OrigArgIndex(origIdx), PartOffset(partOffs) {
+ OutputArg(ArgFlagsTy flags, MVT vt, EVT argvt, Type *OrigTy,
+ unsigned origIdx, unsigned partOffs)
+ : Flags(flags), OrigTy(OrigTy), OrigArgIndex(origIdx),
+ PartOffset(partOffs) {
VT = vt;
ArgVT = argvt;
}
diff --git a/llvm/lib/CodeGen/CallingConvLower.cpp b/llvm/lib/CodeGen/CallingConvLower.cpp
index b71e7812296cc..df3433199681b 100644
--- a/llvm/lib/CodeGen/CallingConvLower.cpp
+++ b/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -89,7 +89,7 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = Ins[i].VT;
ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
- if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this))
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, Ins[i].OrigTy, *this))
report_fatal_error("unable to allocate function argument #" + Twine(i));
}
}
@@ -102,7 +102,7 @@ bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
- if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
+ if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, Outs[i].OrigTy, *this))
return false;
}
return true;
@@ -116,7 +116,7 @@ void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
- if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
+ if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, Outs[i].OrigTy, *this))
report_fatal_error("unable to allocate function return #" + Twine(i));
}
}
@@ -129,7 +129,8 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
for (unsigned i = 0; i != NumOps; ++i) {
MVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
- if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, Outs[i].OrigTy,
+ *this)) {
#ifndef NDEBUG
dbgs() << "Call operand #" << i << " has unhandled type "
<< ArgVT << '\n';
@@ -142,12 +143,13 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
/// Same as above except it takes vectors of types and argument flags.
void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+ SmallVectorImpl<Type *> &OrigTys,
CCAssignFn Fn) {
unsigned NumOps = ArgVTs.size();
for (unsigned i = 0; i != NumOps; ++i) {
MVT ArgVT = ArgVTs[i];
ISD::ArgFlagsTy ArgFlags = Flags[i];
- if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, OrigTys[i], *this)) {
#ifndef NDEBUG
dbgs() << "Call operand #" << i << " has unhandled type "
<< ArgVT << '\n';
@@ -164,7 +166,7 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
MVT VT = Ins[i].VT;
ISD::ArgFlagsTy Flags = Ins[i].Flags;
- if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
+ if (Fn(i, VT, VT, CCValAssign::Full, Flags, Ins[i].OrigTy, *this)) {
#ifndef NDEBUG
dbgs() << "Call result #" << i << " has unhandled type "
<< VT << '\n';
@@ -175,8 +177,8 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
}
/// Same as above except it's specialized for calls that produce a single value.
-void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
- if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
+void CCState::AnalyzeCallResult(MVT VT, Type *OrigTy, CCAssignFn Fn) {
+ if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), OrigTy, *this)) {
#ifndef NDEBUG
dbgs() << "Call result has unhandled type "
<< VT << '\n';
@@ -213,7 +215,8 @@ void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCRegister> &Regs,
// location in memory.
bool HaveRegParm;
do {
- if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
+ Type *OrigTy = EVT(VT).getTypeForEVT(Context);
+ if (Fn(0, VT, VT, CCValAssign::Full, Flags, OrigTy, *this)) {
#ifndef NDEBUG
dbgs() << "Call has unhandled type " << VT
<< " while computing remaining regparms\n";
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 0f3ec8b10b02e..90a18b86c1b1f 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -1099,7 +1099,7 @@ bool CallLowering::checkReturn(CCState &CCInfo,
CCAssignFn *Fn) const {
for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
MVT VT = MVT::getVT(Outs[I].Ty);
- if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
+ if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], Outs[I].Ty, CCInfo))
return false;
}
return true;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 0d1e95450a6b7..476fe18068430 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2273,8 +2273,9 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
Flags.setNoExt();
for (unsigned i = 0; i < NumParts; ++i) {
- Outs.push_back(ISD::OutputArg(
- Flags, Parts[i].getValueType().getSimpleVT(), VT, 0, 0));
+ Outs.push_back(ISD::OutputArg(Flags,
+ Parts[i].getValueType().getSimpleVT(),
+ VT, I.getOperand(0)->getType(), 0, 0));
OutVals.push_back(Parts[i]);
}
}
@@ -2292,6 +2293,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
Flags.setSwiftError();
Outs.push_back(ISD::OutputArg(Flags, /*vt=*/TLI.getPointerTy(DL),
/*argvt=*/EVT(TLI.getPointerTy(DL)),
+ PointerType::getUnqual(*DAG.getContext()),
/*origidx=*/1, /*partOffs=*/0));
// Create SDNode for the swifterror virtual register.
OutVals.push_back(
@@ -11247,7 +11249,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
// For scalable vectors the scalable part is currently handled
// by individual targets, so we just use the known minimum size here.
ISD::OutputArg MyFlags(
- Flags, Parts[j].getValueType().getSimpleVT(), VT, i,
+ Flags, Parts[j].getValueType().getSimpleVT(), VT, Args[i].Ty, i,
j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
if (NumParts > 1 && j == 0)
MyFlags.Flags.setSplit();
@@ -11625,7 +11627,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
ISD::ArgFlagsTy Flags;
Flags.setSRet();
MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVT);
- ISD::InputArg RetArg(Flags, RegisterVT, ValueVT, true,
+ ISD::InputArg RetArg(Flags, RegisterVT, ValueVT, F.getReturnType(), true,
ISD::InputArg::NoArgIndex, 0);
Ins.push_back(RetArg);
}
@@ -11763,7 +11765,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
// are responsible for handling scalable vector arguments and
// return values.
ISD::InputArg MyFlags(
- Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
+ Flags, RegisterVT, VT, Arg.getType(), isArgValueUsed, ArgNo,
PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
if (NumRegs > 1 && i == 0)
MyFlags.Flags.setSplit();
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index bf4c9f91d1c97..21c1ef7768727 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -1772,7 +1772,7 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i)
- Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, 0, 0));
+ Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, ReturnType, 0, 0));
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
index 787a1a83613c9..cc46159915d76 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp
@@ -75,8 +75,10 @@ static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
auto &It = PendingMembers[0];
CCAssignFn *AssignFn =
TLI->CCAssignFnForCall(State.getCallingConv(), /*IsVarArg=*/false);
+ // FIXME: Get the correct original type.
+ Type *OrigTy = EVT(It.getValVT()).getTypeForEVT(State.getContext());
if (AssignFn(It.getValNo(), It.getValVT(), It.getValVT(), CCValAssign::Full,
- ArgFlags, State))
+ ArgFlags, OrigTy, State))
llvm_unreachable("Call operand has unhandled type");
// Return the flags to how they were before.
diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.h b/llvm/lib/Target/AArch64/AArch64CallingConvention.h
index 63185a97cba03..7105fa695334b 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.h
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.h
@@ -18,52 +18,63 @@
namespace llvm {
bool CC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State);
+ Type *OrigTy, CCState &State);
bool CC_AArch64_Arm64EC_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool CC_AArch64_Arm64EC_Thunk(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool CC_AArch64_Arm64EC_Thunk_Native(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool CC_AArch64_DarwinPCS_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool CC_AArch64_DarwinPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool CC_AArch64_DarwinPCS_ILP32_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT,
- CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool CC_AArch64_Win64PCS(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State);
+ Type *OrigTy, CCState &State);
bool CC_AArch64_Win64_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool CC_AArch64_Win64_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool CC_AArch64_Arm64EC_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool CC_AArch64_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State);
+ Type *OrigTy, CCState &State);
bool CC_AArch64_Preserve_None(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool RetCC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
- CCState &State);
+ Type *OrigTy, CCState &State);
bool RetCC_AArch64_Arm64EC_Thunk(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
+ CCState &State);
bool RetCC_AArch64_Arm64EC_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags,
+ ISD::ArgFlagsTy ArgFlags, Type *OrigTy,
CCState &State);
} // namespace llvm
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 9d74bb5a8661d..41ff169891e87 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -267,7 +267,7 @@ class AArch64FastISel final : public FastISel {
private:
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
- unsigned &NumBytes);
+ SmallVectorImpl<Type *> &OrigTys, unsigned &NumBytes);
bool finishCall(CallLoweringInfo &CLI, unsigned NumBytes);
public:
@@ -3011,11 +3011,13 @@ bool AArch64FastISel::fastLowerArguments() {
bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI,
SmallVectorImpl<MVT> &OutVTs,
+ SmallVectorImpl<Type *> &OrigTys,
unsigned &NumBytes) {
CallingConv::ID CC = CLI.CallConv;
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
- CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
+ CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, OrigTys,
+ CCAssignFnForCall(CC));
// Get a count of how many bytes are to be pushed on the stack.
NumBytes = CCInfo.getStackSize();
@@ -3194,6 +3196,7 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
// Set up the argument vectors.
SmallVector<MVT, 16> OutVTs;
+ ...
[truncated]
|
It is common to have ABI requirements for illegal types: For example, two i64 argument parts that originally came from an fp128 argument may have a different call ABI than ones that came from a i128 argument.
The current calling convention lowering does not provide access to this information, so backends come up with various hacks to support it (like additional pre-analysis cached in CCState, or bypassing the default logic entirely).
This PR adds the original IR type to InputArg/OutputArg and passes it down to CCAssignFn. It is not actually used anywhere yet, this just does the mechanical changes to thread through the new argument.