diff --git a/llvm/include/llvm/CodeGen/CallingConvLower.h b/llvm/include/llvm/CodeGen/CallingConvLower.h index a114a0c40a083..c7281ff1a99e6 100644 --- a/llvm/include/llvm/CodeGen/CallingConvLower.h +++ b/llvm/include/llvm/CodeGen/CallingConvLower.h @@ -154,9 +154,9 @@ struct ForwardedRegister { /// CCAssignFn - This function assigns a location for Val, updating State to /// reflect the change. It returns 'true' if it failed to handle Val. -typedef bool CCAssignFn(unsigned ValNo, MVT ValVT, - MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); +typedef bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, + Type *OrigTy, CCState &State); /// CCCustomFn - This function assigns a location for Val, possibly updating /// all args to reflect changes and indicates if it handled it. It must set @@ -290,6 +290,7 @@ class CCState { /// and argument flags. LLVM_ABI void AnalyzeCallOperands(SmallVectorImpl &ArgVTs, SmallVectorImpl &Flags, + SmallVectorImpl &OrigTys, CCAssignFn Fn); /// The function will invoke AnalyzeCallOperands. @@ -310,7 +311,7 @@ class CCState { /// AnalyzeCallResult - Same as above except it's specialized for calls which /// produce a single value. - LLVM_ABI void AnalyzeCallResult(MVT VT, CCAssignFn Fn); + LLVM_ABI void AnalyzeCallResult(MVT VT, Type *OrigTy, CCAssignFn Fn); /// getFirstUnallocated - Return the index of the first unallocated register /// in the set, or Regs.size() if they are all allocated. diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h index 8d98255f03f8a..a8bde824527a5 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -198,7 +198,7 @@ class LLVM_ABI CallLowering { CCValAssign::LocInfo LocInfo, const ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State) { if (getAssignFn(State.isVarArg())(ValNo, ValVT, LocVT, LocInfo, Flags, - State)) + Info.Ty, State)) return true; StackSize = State.getStackSize(); return false; diff --git a/llvm/include/llvm/CodeGen/TargetCallingConv.h b/llvm/include/llvm/CodeGen/TargetCallingConv.h index ca76c04767301..6e80117592d5c 100644 --- a/llvm/include/llvm/CodeGen/TargetCallingConv.h +++ b/llvm/include/llvm/CodeGen/TargetCallingConv.h @@ -205,6 +205,7 @@ namespace ISD { ArgFlagsTy Flags; MVT VT = MVT::Other; EVT ArgVT; + Type *OrigTy = nullptr; bool Used = false; /// Index original Function's argument. @@ -218,9 +219,10 @@ namespace ISD { unsigned PartOffset; InputArg() = default; - InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool used, + InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, Type *OrigTy, bool used, unsigned origIdx, unsigned partOffs) - : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) { + : Flags(flags), OrigTy(OrigTy), Used(used), OrigArgIndex(origIdx), + PartOffset(partOffs) { VT = vt.getSimpleVT(); ArgVT = argvt; } @@ -243,6 +245,7 @@ namespace ISD { ArgFlagsTy Flags; MVT VT; EVT ArgVT; + Type *OrigTy = nullptr; /// Index original Function's argument. unsigned OrigArgIndex; @@ -253,9 +256,10 @@ namespace ISD { unsigned PartOffset; OutputArg() = default; - OutputArg(ArgFlagsTy flags, MVT vt, EVT argvt, unsigned origIdx, - unsigned partOffs) - : Flags(flags), OrigArgIndex(origIdx), PartOffset(partOffs) { + OutputArg(ArgFlagsTy flags, MVT vt, EVT argvt, Type *OrigTy, + unsigned origIdx, unsigned partOffs) + : Flags(flags), OrigTy(OrigTy), OrigArgIndex(origIdx), + PartOffset(partOffs) { VT = vt; ArgVT = argvt; } diff --git a/llvm/lib/CodeGen/CallingConvLower.cpp b/llvm/lib/CodeGen/CallingConvLower.cpp index b71e7812296cc..df3433199681b 100644 --- a/llvm/lib/CodeGen/CallingConvLower.cpp +++ b/llvm/lib/CodeGen/CallingConvLower.cpp @@ -89,7 +89,7 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl &Ins, for (unsigned i = 0; i != NumArgs; ++i) { MVT ArgVT = Ins[i].VT; ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; - if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) + if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, Ins[i].OrigTy, *this)) report_fatal_error("unable to allocate function argument #" + Twine(i)); } } @@ -102,7 +102,7 @@ bool CCState::CheckReturn(const SmallVectorImpl &Outs, for (unsigned i = 0, e = Outs.size(); i != e; ++i) { MVT VT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; - if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) + if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, Outs[i].OrigTy, *this)) return false; } return true; @@ -116,7 +116,7 @@ void CCState::AnalyzeReturn(const SmallVectorImpl &Outs, for (unsigned i = 0, e = Outs.size(); i != e; ++i) { MVT VT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; - if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) + if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, Outs[i].OrigTy, *this)) report_fatal_error("unable to allocate function return #" + Twine(i)); } } @@ -129,7 +129,8 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl &Outs, for (unsigned i = 0; i != NumOps; ++i) { MVT ArgVT = Outs[i].VT; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; - if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { + if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, Outs[i].OrigTy, + *this)) { #ifndef NDEBUG dbgs() << "Call operand #" << i << " has unhandled type " << ArgVT << '\n'; @@ -142,12 +143,13 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl &Outs, /// Same as above except it takes vectors of types and argument flags. void CCState::AnalyzeCallOperands(SmallVectorImpl &ArgVTs, SmallVectorImpl &Flags, + SmallVectorImpl &OrigTys, CCAssignFn Fn) { unsigned NumOps = ArgVTs.size(); for (unsigned i = 0; i != NumOps; ++i) { MVT ArgVT = ArgVTs[i]; ISD::ArgFlagsTy ArgFlags = Flags[i]; - if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { + if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, OrigTys[i], *this)) { #ifndef NDEBUG dbgs() << "Call operand #" << i << " has unhandled type " << ArgVT << '\n'; @@ -164,7 +166,7 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl &Ins, for (unsigned i = 0, e = Ins.size(); i != e; ++i) { MVT VT = Ins[i].VT; ISD::ArgFlagsTy Flags = Ins[i].Flags; - if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) { + if (Fn(i, VT, VT, CCValAssign::Full, Flags, Ins[i].OrigTy, *this)) { #ifndef NDEBUG dbgs() << "Call result #" << i << " has unhandled type " << VT << '\n'; @@ -175,8 +177,8 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl &Ins, } /// Same as above except it's specialized for calls that produce a single value. -void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) { - if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) { +void CCState::AnalyzeCallResult(MVT VT, Type *OrigTy, CCAssignFn Fn) { + if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), OrigTy, *this)) { #ifndef NDEBUG dbgs() << "Call result has unhandled type " << VT << '\n'; @@ -213,7 +215,8 @@ void CCState::getRemainingRegParmsForType(SmallVectorImpl &Regs, // location in memory. bool HaveRegParm; do { - if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) { + Type *OrigTy = EVT(VT).getTypeForEVT(Context); + if (Fn(0, VT, VT, CCValAssign::Full, Flags, OrigTy, *this)) { #ifndef NDEBUG dbgs() << "Call has unhandled type " << VT << " while computing remaining regparms\n"; diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp index 0f3ec8b10b02e..90a18b86c1b1f 100644 --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -1099,7 +1099,7 @@ bool CallLowering::checkReturn(CCState &CCInfo, CCAssignFn *Fn) const { for (unsigned I = 0, E = Outs.size(); I < E; ++I) { MVT VT = MVT::getVT(Outs[I].Ty); - if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo)) + if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], Outs[I].Ty, CCInfo)) return false; } return true; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 0d1e95450a6b7..476fe18068430 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2273,8 +2273,9 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) { Flags.setNoExt(); for (unsigned i = 0; i < NumParts; ++i) { - Outs.push_back(ISD::OutputArg( - Flags, Parts[i].getValueType().getSimpleVT(), VT, 0, 0)); + Outs.push_back(ISD::OutputArg(Flags, + Parts[i].getValueType().getSimpleVT(), + VT, I.getOperand(0)->getType(), 0, 0)); OutVals.push_back(Parts[i]); } } @@ -2292,6 +2293,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) { Flags.setSwiftError(); Outs.push_back(ISD::OutputArg(Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)), + PointerType::getUnqual(*DAG.getContext()), /*origidx=*/1, /*partOffs=*/0)); // Create SDNode for the swifterror virtual register. OutVals.push_back( @@ -11247,7 +11249,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { // For scalable vectors the scalable part is currently handled // by individual targets, so we just use the known minimum size here. ISD::OutputArg MyFlags( - Flags, Parts[j].getValueType().getSimpleVT(), VT, i, + Flags, Parts[j].getValueType().getSimpleVT(), VT, Args[i].Ty, i, j * Parts[j].getValueType().getStoreSize().getKnownMinValue()); if (NumParts > 1 && j == 0) MyFlags.Flags.setSplit(); @@ -11625,7 +11627,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) { ISD::ArgFlagsTy Flags; Flags.setSRet(); MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVT); - ISD::InputArg RetArg(Flags, RegisterVT, ValueVT, true, + ISD::InputArg RetArg(Flags, RegisterVT, ValueVT, F.getReturnType(), true, ISD::InputArg::NoArgIndex, 0); Ins.push_back(RetArg); } @@ -11763,7 +11765,7 @@ void SelectionDAGISel::LowerArguments(const Function &F) { // are responsible for handling scalable vector arguments and // return values. ISD::InputArg MyFlags( - Flags, RegisterVT, VT, isArgValueUsed, ArgNo, + Flags, RegisterVT, VT, Arg.getType(), isArgValueUsed, ArgNo, PartBase + i * RegisterVT.getStoreSize().getKnownMinValue()); if (NumRegs > 1 && i == 0) MyFlags.Flags.setSplit(); diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index bf4c9f91d1c97..21c1ef7768727 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1772,7 +1772,7 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType, Flags.setZExt(); for (unsigned i = 0; i < NumParts; ++i) - Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, 0, 0)); + Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, ReturnType, 0, 0)); } } diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp index 787a1a83613c9..cc46159915d76 100644 --- a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp +++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp @@ -75,8 +75,10 @@ static bool finishStackBlock(SmallVectorImpl &PendingMembers, auto &It = PendingMembers[0]; CCAssignFn *AssignFn = TLI->CCAssignFnForCall(State.getCallingConv(), /*IsVarArg=*/false); + // FIXME: Get the correct original type. + Type *OrigTy = EVT(It.getValVT()).getTypeForEVT(State.getContext()); if (AssignFn(It.getValNo(), It.getValVT(), It.getValVT(), CCValAssign::Full, - ArgFlags, State)) + ArgFlags, OrigTy, State)) llvm_unreachable("Call operand has unhandled type"); // Return the flags to how they were before. diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.h b/llvm/lib/Target/AArch64/AArch64CallingConvention.h index 63185a97cba03..7105fa695334b 100644 --- a/llvm/lib/Target/AArch64/AArch64CallingConvention.h +++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.h @@ -18,52 +18,63 @@ namespace llvm { bool CC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_AArch64_Arm64EC_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool CC_AArch64_Arm64EC_Thunk(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool CC_AArch64_Arm64EC_Thunk_Native(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool CC_AArch64_DarwinPCS_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool CC_AArch64_DarwinPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool CC_AArch64_DarwinPCS_ILP32_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, - CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool CC_AArch64_Win64PCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_AArch64_Win64_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool CC_AArch64_Win64_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool CC_AArch64_Arm64EC_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool CC_AArch64_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_AArch64_Preserve_None(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool RetCC_AArch64_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool RetCC_AArch64_Arm64EC_Thunk(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool RetCC_AArch64_Arm64EC_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State); } // namespace llvm diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index 9d74bb5a8661d..41ff169891e87 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -267,7 +267,7 @@ class AArch64FastISel final : public FastISel { private: CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const; bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl &ArgVTs, - unsigned &NumBytes); + SmallVectorImpl &OrigTys, unsigned &NumBytes); bool finishCall(CallLoweringInfo &CLI, unsigned NumBytes); public: @@ -3011,11 +3011,13 @@ bool AArch64FastISel::fastLowerArguments() { bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl &OutVTs, + SmallVectorImpl &OrigTys, unsigned &NumBytes) { CallingConv::ID CC = CLI.CallConv; SmallVector ArgLocs; CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context); - CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC)); + CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, OrigTys, + CCAssignFnForCall(CC)); // Get a count of how many bytes are to be pushed on the stack. NumBytes = CCInfo.getStackSize(); @@ -3194,6 +3196,7 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) { // Set up the argument vectors. SmallVector OutVTs; + SmallVector OrigTys; OutVTs.reserve(CLI.OutVals.size()); for (auto *Val : CLI.OutVals) { @@ -3207,6 +3210,7 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) { return false; OutVTs.push_back(VT); + OrigTys.push_back(Val->getType()); } Address Addr; @@ -3222,7 +3226,7 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) { // Handle the arguments now that we've gotten them. unsigned NumBytes; - if (!processCallArgs(CLI, OutVTs, NumBytes)) + if (!processCallArgs(CLI, OutVTs, OrigTys, NumBytes)) return false; const AArch64RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 3c06c6abe8d23..c7586abefa501 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -7886,8 +7886,8 @@ SDValue AArch64TargetLowering::LowerFormalArguments( else if (ActualMVT == MVT::i16) ValVT = MVT::i16; } - bool Res = - AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo); + bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, + Ins[i].OrigTy, CCInfo); assert(!Res && "Call operand has unhandled type"); (void)Res; } @@ -8557,7 +8557,8 @@ static void analyzeCallOperands(const AArch64TargetLowering &TLI, // FIXME: CCAssignFnForCall should be called once, for the call and not per // argument. This logic should exactly mirror LowerFormalArguments. CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CalleeCC, UseVarArgCC); - bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); + bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, + Outs[i].OrigTy, CCInfo); assert(!Res && "Call operand has unhandled type"); (void)Res; } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp index 2155acef42939..79bef76cf4c4f 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -128,9 +128,9 @@ struct AArch64OutgoingValueAssigner if (!Flags.isVarArg() && !UseVarArgsCCForFixed) { if (!IsReturn) applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT); - Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); + Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, Info.Ty, State); } else - Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State); + Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, Info.Ty, State); StackSize = State.getStackSize(); return Res; diff --git a/llvm/lib/Target/ARM/ARMCallingConv.h b/llvm/lib/Target/ARM/ARMCallingConv.h index 7c692f03b4405..b6b2d5928dabc 100644 --- a/llvm/lib/Target/ARM/ARMCallingConv.h +++ b/llvm/lib/Target/ARM/ARMCallingConv.h @@ -19,34 +19,35 @@ namespace llvm { bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); } // namespace llvm diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 7ba2487d2390d..54aa355d4db0a 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -1943,8 +1943,11 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, unsigned &NumBytes, bool isVarArg) { SmallVector ArgLocs; + SmallVector OrigTys; + for (Value *Arg : Args) + OrigTys.push_back(Arg->getType()); CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context); - CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, + CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, OrigTys, CCAssignFnForCall(CC, false, isVarArg)); // Check that we can handle all of the arguments. If we can't, then bail out @@ -2093,7 +2096,8 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, if (RetVT != MVT::isVoid) { SmallVector RVLocs; CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); - CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); + CCInfo.AnalyzeCallResult(RetVT, I->getType(), + CCAssignFnForCall(CC, true, isVarArg)); // Copy all of the result registers out of their specified physreg. if (RVLocs.size() == 2 && RetVT == MVT::f64) { @@ -2278,7 +2282,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { if (RetVT != MVT::isVoid && RetVT != MVT::i32) { SmallVector RVLocs; CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); - CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); + CCInfo.AnalyzeCallResult(RetVT, RetTy, CCAssignFnForCall(CC, true, false)); if (RVLocs.size() >= 2 && RetVT != MVT::f64) return false; } @@ -2389,7 +2393,8 @@ bool ARMFastISel::SelectCall(const Instruction *I, RetVT != MVT::i16 && RetVT != MVT::i32) { SmallVector RVLocs; CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); - CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); + CCInfo.AnalyzeCallResult(RetVT, RetTy, + CCAssignFnForCall(CC, true, isVarArg)); if (RVLocs.size() >= 2 && RetVT != MVT::f64) return false; } diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp index d23c5f43ad4ff..7a0a5103a23c9 100644 --- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp @@ -356,12 +356,13 @@ void LanaiTargetLowering::LowerAsmOperandForConstraint( static unsigned NumFixedArgs; static bool CC_Lanai32_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State) { + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State) { // Handle fixed arguments with default CC. // Note: Both the default and fast CC handle VarArg the same and hence the // calling convention of the function is not considered here. if (ValNo < NumFixedArgs) { - return CC_Lanai32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State); + return CC_Lanai32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State); } // Promote i8/i16 args to i32 diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 6583a0fef3d61..14472419a10f4 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -7073,7 +7073,8 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State) { + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State) { if (LocVT == MVT::i32 || LocVT == MVT::i64) { // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, SpLim // s0 s1 s2 s3 s4 s5 s6 s7 s8 diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp index d23504c203dd3..6da5e66be4ad8 100644 --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -377,6 +377,7 @@ static void AnalyzeArguments(CCState &State, for (unsigned i = 0, e = ArgsParts.size(); i != e; i++) { MVT ArgVT = Args[ValNo].VT; ISD::ArgFlagsTy ArgFlags = Args[ValNo].Flags; + Type *OrigTy = Args[ValNo].OrigTy; MVT LocVT = ArgVT; CCValAssign::LocInfo LocInfo = CCValAssign::Full; @@ -411,7 +412,8 @@ static void AnalyzeArguments(CCState &State, RegsLeft -= 1; UsedStack = true; - CC_MSP430_AssignStack(ValNo++, ArgVT, LocVT, LocInfo, ArgFlags, State); + CC_MSP430_AssignStack(ValNo++, ArgVT, LocVT, LocInfo, ArgFlags, OrigTy, + State); } else if (Parts <= RegsLeft) { for (unsigned j = 0; j < Parts; j++) { MCRegister Reg = State.AllocateReg(RegList); @@ -421,7 +423,8 @@ static void AnalyzeArguments(CCState &State, } else { UsedStack = true; for (unsigned j = 0; j < Parts; j++) - CC_MSP430_AssignStack(ValNo++, ArgVT, LocVT, LocInfo, ArgFlags, State); + CC_MSP430_AssignStack(ValNo++, ArgVT, LocVT, LocInfo, ArgFlags, OrigTy, + State); } } } diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp index f3812d185ec92..a9ac0eae5dace 100644 --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -266,17 +266,19 @@ class MipsFastISel final : public FastISel { static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State) LLVM_ATTRIBUTE_UNUSED; + Type *OrigTy, CCState &State) LLVM_ATTRIBUTE_UNUSED; static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State) { + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State) { llvm_unreachable("should not be called"); } static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State) { + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State) { llvm_unreachable("should not be called"); } @@ -1144,8 +1146,12 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI, unsigned &NumBytes) { CallingConv::ID CC = CLI.CallConv; SmallVector ArgLocs; + SmallVector ArgTys; + for (const ArgListEntry &Arg : CLI.Args) + ArgTys.push_back(Arg.Val->getType()); CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context); - CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC)); + CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, ArgTys, + CCAssignFnForCall(CC)); // Get a count of how many bytes are to be pushed on the stack. NumBytes = CCInfo.getStackSize(); // This is the minimum argument area used for A0-A3. diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index 881ba8e2f9eff..50c05223c9a1e 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -3160,17 +3160,19 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, return false; } -static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, - MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State) { +static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State) { static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 }; return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs); } -static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, - MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State) { +static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State) { static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 }; return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs); @@ -3178,7 +3180,7 @@ static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State) LLVM_ATTRIBUTE_UNUSED; + Type *OrigTy, CCState &State) LLVM_ATTRIBUTE_UNUSED; #include "MipsGenCallingConv.inc" diff --git a/llvm/lib/Target/PowerPC/PPCCallingConv.h b/llvm/lib/Target/PowerPC/PPCCallingConv.h index ab61472c72eb8..9c47142f78890 100644 --- a/llvm/lib/Target/PowerPC/PPCCallingConv.h +++ b/llvm/lib/Target/PowerPC/PPCCallingConv.h @@ -21,28 +21,29 @@ namespace llvm { bool RetCC_PPC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool RetCC_PPC64_ELF_FIS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool RetCC_PPC_Cold(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_PPC32_SVR4(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_PPC64_ELF(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_PPC64_ELF_FIS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_PPC32_SVR4_ByVal(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_PPC32_SVR4_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, + CCState &State); } // End llvm namespace diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index e92e00f80c552..0b68ba12e337f 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -1374,7 +1374,10 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl &Args, unsigned LinkageSize = Subtarget->getFrameLowering()->getLinkageSize(); CCInfo.AllocateStack(LinkageSize, Align(8)); - CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_PPC64_ELF_FIS); + SmallVector ArgTys; + for (Value *Arg : Args) + ArgTys.push_back(Arg->getType()); + CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, ArgTys, CC_PPC64_ELF_FIS); // Bail out if we can't handle any of the arguments. for (const CCValAssign &VA : ArgLocs) { @@ -1487,7 +1490,7 @@ bool PPCFastISel::finishCall(MVT RetVT, CallLoweringInfo &CLI, unsigned &NumByte if (RetVT != MVT::isVoid) { SmallVector RVLocs; CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); - CCInfo.AnalyzeCallResult(RetVT, RetCC_PPC64_ELF_FIS); + CCInfo.AnalyzeCallResult(RetVT, CLI.RetTy, RetCC_PPC64_ELF_FIS); CCValAssign &VA = RVLocs[0]; assert(RVLocs.size() == 1 && "No support for multi-reg return values!"); assert(VA.isRegLoc() && "Can only return in registers!"); @@ -1573,7 +1576,7 @@ bool PPCFastISel::fastLowerCall(CallLoweringInfo &CLI) { RetVT != MVT::f64) { SmallVector RVLocs; CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs, *Context); - CCInfo.AnalyzeCallResult(RetVT, RetCC_PPC64_ELF_FIS); + CCInfo.AnalyzeCallResult(RetVT, RetTy, RetCC_PPC64_ELF_FIS); if (RVLocs.size() > 1) return false; } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 2698bd6f37c59..74ae8502dccea 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -6091,10 +6091,10 @@ SDValue PPCTargetLowering::LowerCall_32SVR4( if (!ArgFlags.isVarArg()) { Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, - CCInfo); + Outs[i].OrigTy, CCInfo); } else { Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, - ArgFlags, CCInfo); + ArgFlags, Outs[i].OrigTy, CCInfo); } if (Result) { @@ -6905,7 +6905,7 @@ static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) { static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State) { + Type *OrigTy, CCState &State) { const PPCSubtarget &Subtarget = static_cast( State.getMachineFunction().getSubtarget()); const bool IsPPC64 = Subtarget.isPPC64(); diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp index 70127e3beede5..78f47794a5b66 100644 --- a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp +++ b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp @@ -741,7 +741,7 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State) { + Type *OrigTy, CCState &State) { if (ArgFlags.isNest()) { report_fatal_error( "Attribute 'nest' is not supported in GHC calling convention"); diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.h b/llvm/lib/Target/RISCV/RISCVCallingConv.h index 2030ce1f1ac4b..0847dd6a12b82 100644 --- a/llvm/lib/Target/RISCV/RISCVCallingConv.h +++ b/llvm/lib/Target/RISCV/RISCVCallingConv.h @@ -33,7 +33,7 @@ bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); namespace RISCV { diff --git a/llvm/lib/Target/X86/GISel/X86CallLowering.cpp b/llvm/lib/Target/X86/GISel/X86CallLowering.cpp index d9f4405de04b2..c0b9339e9bc34 100644 --- a/llvm/lib/Target/X86/GISel/X86CallLowering.cpp +++ b/llvm/lib/Target/X86/GISel/X86CallLowering.cpp @@ -69,7 +69,7 @@ struct X86OutgoingValueAssigner : public CallLowering::OutgoingValueAssigner { CCValAssign::LocInfo LocInfo, const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State) override { - bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); + bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, Info.Ty, State); StackSize = State.getStackSize(); static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2, diff --git a/llvm/lib/Target/X86/X86CallingConv.h b/llvm/lib/Target/X86/X86CallingConv.h index 191e0fa619b24..8e37f345ed9e6 100644 --- a/llvm/lib/Target/X86/X86CallingConv.h +++ b/llvm/lib/Target/X86/X86CallingConv.h @@ -22,10 +22,10 @@ namespace llvm { bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, - CCState &State); + Type *OrigTy, CCState &State); bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State); + ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State); } // End llvm namespace diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 067bd43c8c866..f007886115d35 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -3323,6 +3323,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { return false; SmallVector OutVTs; + SmallVector ArgTys; SmallVector ArgRegs; // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra @@ -3369,6 +3370,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { ArgRegs.push_back(ResultReg); OutVTs.push_back(VT); + ArgTys.push_back(Val->getType()); } // Analyze operands of the call, assigning locations to each operand. @@ -3379,7 +3381,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { if (IsWin64) CCInfo.AllocateStack(32, Align(8)); - CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86); + CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, ArgTys, CC_X86); // Get a count of how many bytes are to be pushed on the stack. unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); diff --git a/llvm/utils/TableGen/CallingConvEmitter.cpp b/llvm/utils/TableGen/CallingConvEmitter.cpp index 3084d90d444aa..e0d933723ad66 100644 --- a/llvm/utils/TableGen/CallingConvEmitter.cpp +++ b/llvm/utils/TableGen/CallingConvEmitter.cpp @@ -77,7 +77,7 @@ void CallingConvEmitter::run(raw_ostream &O) { O << CC->getName() << "(unsigned ValNo, MVT ValVT,\n" << std::string(Pad, ' ') << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n" << std::string(Pad, ' ') - << "ISD::ArgFlagsTy ArgFlags, CCState &State);\n"; + << "ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State);\n"; } } @@ -115,7 +115,8 @@ void CallingConvEmitter::emitCallingConv(const Record *CC, raw_ostream &O) { } O << CurrentAction << "(unsigned ValNo, MVT ValVT,\n" << std::string(Pad, ' ') << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n" - << std::string(Pad, ' ') << "ISD::ArgFlagsTy ArgFlags, CCState &State) {\n"; + << std::string(Pad, ' ') << "ISD::ArgFlagsTy ArgFlags, Type *OrigTy, " + << "CCState &State) {\n"; // Emit all of the actions, in order. for (unsigned I = 0, E = CCActions->size(); I != E; ++I) { const Record *Action = CCActions->getElementAsRecord(I); @@ -227,7 +228,7 @@ void CallingConvEmitter::emitAction(const Record *Action, indent Indent, if (Action->isSubClassOf("CCDelegateTo")) { const Record *CC = Action->getValueAsDef("CC"); O << Indent << "if (!" << CC->getName() - << "(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))\n" + << "(ValNo, ValVT, LocVT, LocInfo, ArgFlags, OrigTy, State))\n" << Indent + 2 << "return false;\n"; DelegateToMap[CurrentAction].insert(CC->getName().str()); } else if (Action->isSubClassOf("CCAssignToReg") ||