Skip to content

【Hackathon 7th Fundable Projects 2 No.73】 [fluid_ops] load_combine #68665

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,15 @@ class InferShapeArgumentMappingContext : public phi::ArgumentMappingContext {
});
}

bool IsVocabOutput(const std::string& name) const override {
auto var_types = ctx_.GetOutputsVarType(name);
return std::all_of(var_types.begin(),
var_types.end(),
[](const proto::VarType::Type& type) {
return type == proto::VarType::VOCAB;
});
}

bool IsSelectedRowsOutput(const std::string& name) const override {
auto var_types = ctx_.GetOutputsVarType(name);
return std::all_of(var_types.begin(),
Expand Down
10 changes: 6 additions & 4 deletions paddle/fluid/framework/new_executor/interpreter/static_build.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,8 @@ bool BlockCanBeStaticBuilt(const framework::BlockDesc& block) {
}

inline bool IsExtendedTensor(const phi::TensorBase& tensor) {
return framework::RawTensor::classof(&tensor) ||
phi::Strings::classof(&tensor) || phi::Vocab::classof(&tensor);
return phi::RawTensor::classof(&tensor) || phi::Strings::classof(&tensor) ||
phi::Vocab::classof(&tensor);
}

bool TensorShouldBeFakeInitialized(const OperatorBase& op,
Expand Down Expand Up @@ -281,9 +281,11 @@ phi::TensorBase* GetTensorFormVar(framework::Variable* var) {
return var->template GetMutable<phi::TensorArray>();
} else if (var->template IsType<phi::Strings>()) {
return var->template GetMutable<phi::Strings>();
} else if (var->template IsType<paddle::framework::RawTensor>() ||
} else if (var->template IsType<phi::Vocab>()) {
return var->template GetMutable<phi::Vocab>();
} else if (var->template IsType<phi::RawTensor>() ||
!var->IsInitialized()) {
return var->template GetMutable<paddle::framework::RawTensor>();
return var->template GetMutable<phi::RawTensor>();
} else {
PADDLE_THROW(common::errors::Unimplemented(
"Unsupported `%s` type when get tensor.",
Expand Down
8 changes: 8 additions & 0 deletions paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ limitations under the License. */
#include "paddle/fluid/framework/unused_var_check.h"
#include "paddle/phi/core/memory/malloc.h"
#include "paddle/phi/core/platform/device_context.h"
#include "paddle/phi/core/vocab/string_array.h"

#include "paddle/common/flags.h"
#include "paddle/common/macros.h"
Expand Down Expand Up @@ -697,6 +698,13 @@ class ExecutionArgumentMappingContext : public phi::ArgumentMappingContext {
});
}

bool IsVocabOutput(const std::string& name) const override {
auto vars = ctx_.MultiOutputVar(name);
return std::all_of(vars.begin(), vars.end(), [](const Variable* var) {
return var->IsType<phi::Vocab>();
});
}

bool IsSelectedRowsOutput(const std::string& name) const override {
auto vars = ctx_.MultiOutputVar(name);
return std::all_of(vars.begin(), vars.end(), [](const Variable* var) {
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/framework/type_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ bool TypeInfoTraits<BaseT, DerivedT>::classof(const BaseT* obj) {
return obj->type_info() == kType;
}

template class TypeInfoTraits<phi::TensorBase, paddle::framework::RawTensor>;
template class TypeInfoTraits<phi::TensorBase, egr::VariableCompatTensor>;
template class TypeInfoTraits<phi::TensorBase, paddle::prim::DescTensor>;
template class TypeInfoTraits<phi::TensorBase, paddle::primitive::LazyTensor>;
Expand Down
6 changes: 6 additions & 0 deletions paddle/fluid/framework/variable.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,12 @@ class Variable {
if (!holder_) {
holder_.reset(new PlaceholderImpl<T>());
} else {
// If holder_ is RawTensor, call holder_->Ptr() GetMutable again. Used for
// load_combine.
if (holder_->Type() == VarTypeTrait<RawTensor>::kId &&
holder_->Type() != VarTypeTrait<T>::kId) {
return static_cast<RawTensor*>(holder_->Ptr())->GetMutable<T>();
}
PADDLE_ENFORCE_EQ(
holder_->Type(),
VarTypeTrait<T>::kId,
Expand Down
11 changes: 0 additions & 11 deletions paddle/fluid/operators/load_combine_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,14 +81,3 @@ namespace ops = paddle::operators; // NOLINT
REGISTER_OPERATOR(load_combine,
ops::LoadCombineOp,
ops::LoadCombineOpProtoMaker);

PD_REGISTER_STRUCT_KERNEL(load_combine,
CPU,
ALL_LAYOUT,
ops::LoadCombineOpKernel,
float,
double,
phi::dtype::bfloat16,
int,
int8_t,
int64_t) {}
26 changes: 0 additions & 26 deletions paddle/fluid/operators/load_combine_op.cu

This file was deleted.

26 changes: 0 additions & 26 deletions paddle/fluid/operators/load_combine_op_xpu.cc

This file was deleted.

41 changes: 41 additions & 0 deletions paddle/fluid/operators/ops_signature/load_combine_sig.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/core/compat/op_utils.h"

namespace phi {

KernelSignature LoadCombineOpArgumentMapping(
const ArgumentMappingContext& ctx) {
if (ctx.IsDenseTensorOutput("Out")) {
return KernelSignature("load_combine",
{},
{"file_path", "load_as_fp16", "model_from_memory"},
{"Out"});
} else if (ctx.IsVocabOutput("Out")) {
return KernelSignature("load_combine_vocab",
{},
{"file_path", "load_as_fp16", "model_from_memory"},
{"Out"});
} else {
return KernelSignature("load_combine_extended",
{},
{"file_path", "load_as_fp16", "model_from_memory"},
{"Out"});
}
}

} // namespace phi

PD_REGISTER_ARG_MAPPING_FN(load_combine, phi::LoadCombineOpArgumentMapping);
1 change: 0 additions & 1 deletion paddle/fluid/pir/dialect/operator/utils/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ namespace paddle {
namespace dialect {

const std::unordered_set<std::string> LegacyOpList = {
LoadCombineOp::name(),
CConcatOp::name(),
CBroadcast_Op::name(),
CBroadcastOp::name(),
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/core/compat/arg_map_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ class ArgumentMappingContext {

virtual bool IsDenseTensorOutput(const std::string& name) const = 0;
virtual bool IsSelectedRowsOutput(const std::string& name) const = 0;
virtual bool IsVocabOutput(const std::string& name) const { return false; }

// use this function to mark it comes from InferShapeArgumentMappingContext
// and will be used in infershape
Expand Down
7 changes: 5 additions & 2 deletions paddle/phi/core/kernel_registry.cc
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,11 @@ void SetKernelArgsDef(const std::vector<std::type_index>& args_type,
default_tensor_layout,
default_key.dtype(),
arg_type);
} else if (arg_type ==
std::type_index(typeid(ExtendedTensor*))) { // NOLINT
} else if (arg_type == std::type_index(typeid(ExtendedTensor*)) ||
arg_type ==
std::type_index(typeid(std::vector<ExtendedTensor*>)) ||
arg_type ==
std::type_index(typeid(std::vector<Vocab*>))) { // NOLINT
args_def->AppendOutput(default_key.backend(),
default_tensor_layout,
default_key.dtype(),
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/core/kernel_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,8 @@ struct KernelImpl<Return (*)(DevCtx, Args...), kernel_fn> {

PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(TensorArray);
PD_SPECIALIZE_KernelCallHelper_FOR_OUTPUT(ExtendedTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(ExtendedTensor);
PD_SPECIALIZE_KernelCallHelper_FOR_MULTI_OUTPUT(Vocab);

/* End case */
template <typename T>
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/core/utils/type_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ limitations under the License. */
#include "paddle/phi/backends/xpu/xpu_context.h"
#include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h"
#include "paddle/phi/core/framework/feed_fetch_type.h"
#include "paddle/phi/core/raw_tensor.h"
#include "paddle/phi/core/selected_rows.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
Expand Down Expand Up @@ -54,6 +55,7 @@ template class TypeInfoTraits<phi::TensorBase, TensorArray>;
template class TypeInfoTraits<phi::TensorBase, phi::distributed::DistTensor>;
template class TypeInfoTraits<phi::TensorBase, Vocab>;
template class TypeInfoTraits<phi::TensorBase, Strings>;
template class TypeInfoTraits<phi::TensorBase, RawTensor>;
template class TypeInfoTraits<phi::TensorBase, FeedList>;

template class TypeInfoTraits<phi::DeviceContext, CPUContext>;
Expand Down
48 changes: 48 additions & 0 deletions paddle/phi/kernels/cpu/load_combine_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/impl/load_combine_kernel_impl.h"

PD_REGISTER_KERNEL(load_combine,
CPU,
ALL_LAYOUT,
phi::LoadCombineKernel,
float,
double,
phi::dtype::bfloat16,
int,
int8_t,
int64_t) {}

PD_REGISTER_KERNEL(load_combine_vocab,
CPU,
ALL_LAYOUT,
phi::LoadCombineVocabKernel,
float,
double,
phi::dtype::bfloat16,
int,
int8_t,
int64_t) {}

PD_REGISTER_KERNEL(load_combine_extended,
CPU,
ALL_LAYOUT,
phi::LoadCombineExtendedKernel,
float,
double,
phi::dtype::bfloat16,
int,
int8_t,
int64_t) {}
45 changes: 45 additions & 0 deletions paddle/phi/kernels/gpu/load_combine_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/impl/load_combine_kernel_impl.h"

PD_REGISTER_KERNEL(load_combine,
GPU,
ALL_LAYOUT,
phi::LoadCombineKernel,
float,
double,
int,
int8_t,
int64_t) {}

PD_REGISTER_KERNEL(load_combine_vocab,
GPU,
ALL_LAYOUT,
phi::LoadCombineVocabKernel,
float,
double,
int,
int8_t,
int64_t) {}

PD_REGISTER_KERNEL(load_combine_extended,
GPU,
ALL_LAYOUT,
phi::LoadCombineExtendedKernel,
float,
double,
int,
int8_t,
int64_t) {}
Loading