Skip to content

[fluid_ops] clean push_dense yaml config #72056

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 0 additions & 9 deletions paddle/fluid/ir_adaptor/translator/op_compat_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,15 +181,6 @@ def insert_new_mutable_attributes(
"out": "Out",
}
op_arg_name_mappings['fused_softmax_mask_grad'].update({"out": "Softmax"})
op_arg_name_mappings['push_sparse_v2'].update(
{"out_grad_in": "Out@GRAD", "out_grad_out": "Out@GRAD"}
)
op_arg_name_mappings['push_box_sparse'].update(
{"out_grad_in": "Out@GRAD", "out_grad_out": "Out@GRAD"}
)
op_arg_name_mappings['push_gpups_sparse'].update(
{"out_grad": "Out@GRAD", "out_grad_grad": "Out@GRAD"}
)

sparse_op_yaml_files = sparse_op_yaml_file.split(",")
for yaml_file in sparse_op_yaml_files:
Expand Down
12 changes: 0 additions & 12 deletions paddle/fluid/pir/dialect/op_generator/ops_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,10 +197,6 @@
'lod_reset_',
'max_pool2d_v2',
'partial_sum',
'pull_gpups_sparse',
'pull_gpups_sparse_',
'push_gpups_sparse',
'push_gpups_sparse_',
'random_routing',
'rnn_',
'row_conv',
Expand All @@ -214,25 +210,17 @@
'match_matrix_tensor',
'c_scatter',
"cross_entropy_grad2",
'push_sparse_v2',
'push_sparse_v2_',
'pull_sparse_v2',
'partial_concat',
'partial_send',
'partial_recv',
'partial_allgather',
'partial_allgather_',
'gemm_epilogue',
'push_dense',
'legacy_matmul',
'legacy_matmul_grad',
'legacy_matmul_double_grad',
'global_scatter',
'global_gather',
'pull_box_sparse',
'pull_box_sparse_',
'push_box_sparse',
'push_box_sparse_',
'send_and_recv',
'send_and_recv_',
'straight_through_estimator',
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/api/generator/api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
from api_base import PREFIX_TENSOR_NAME, BaseAPI

backward_api_black_list = [
"pull_sparse_v2_grad", # tensor = push_sparse_v2() is not implemented in api_custom_impl.cc
"scale_grad", # tensor = scale is not implemented in api_custom_impl.cc
]

Expand Down
16 changes: 0 additions & 16 deletions paddle/phi/ops/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2538,22 +2538,6 @@
data_type : x
optional : boxes_num

- backward_op : pull_sparse_v2_grad
forward : pull_sparse_v2 (Tensor[] ids, Tensor[] w, int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctrlabel_name = "", int padding_id = 0, bool scale_sparse_grad = true, str[] input_names = {}, bool is_distributed = true) -> Tensor[](out)
args : (Tensor[] ids, Tensor[] w, Tensor[] out_grad, int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctrlabel_name = "", int padding_id = 0, bool scale_sparse_grad = true, str[] input_names = {}, bool is_distributed = true)
output : Tensor[](out_grad_out)
invoke : push_sparse_v2(ids, w, out_grad, embedding_dim, table_id, accessor_class, ctrlabel_name, padding_id, scale_sparse_grad, input_names, is_distributed)

- backward_op : push_gpups_sparse
forward : pull_gpups_sparse (Tensor w, Tensor[] ids, int[] size={}, bool is_sparse=false, bool is_distributed=false) -> Tensor[](out)
args : (Tensor[] ids, Tensor[] out_grad, int[] size, bool is_sparse, bool is_distributed)
output : Tensor[](out_grad_grad){out_grad.size()}
infer_meta :
func : PushGpupsSparseInferMeta
kernel :
func : push_gpups_sparse
inplace : (out_grad -> out_grad_grad)

- backward_op : put_along_axis_grad
forward : put_along_axis (Tensor arr, Tensor indices, Tensor values, int axis, str reduce = "assign", bool include_self = true) -> Tensor(out)
args : (Tensor arr, Tensor indices, Tensor values, Tensor out, Tensor out_grad, int axis, str reduce, bool include_self)
Expand Down
12 changes: 0 additions & 12 deletions paddle/phi/ops/yaml/inconsistent/static_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -470,18 +470,6 @@
output : Tensor(in_grad)
invoke : print(out_grad, first_n, message, summarize, print_tensor_name, print_tensor_type, print_tensor_shape, print_tensor_layout, print_tensor_lod, print_phase, is_forward)

- backward_op : push_box_sparse
forward : pull_box_sparse (Tensor w, Tensor[] ids, bool is_sparse = false, bool is_distributed = false, int size = 1) -> Tensor[](out){ids.size()}
args : (Tensor[] ids, Tensor[] out_grad_in, bool is_sparse = false, bool is_distributed = false, int size = 1)
output : Tensor[](out_grad_out){out_grad_in.size()}
infer_meta :
func : UnchangedMultiInferMeta
param : [out_grad_in]
kernel :
func : push_box_sparse
data_type : out_grad_in
inplace : (out_grad_in -> out_grad_out)

- backward_op : remainder_grad
forward : remainder (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
Expand Down
51 changes: 0 additions & 51 deletions paddle/phi/ops/yaml/inconsistent/static_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -631,57 +631,6 @@
traits : pir::SideEffectTrait
backward: print_grad

- op : pull_box_sparse
args : (Tensor w, Tensor[] ids, bool is_sparse = false, bool is_distributed = false, int size = 1)
output : Tensor[](out){ids.size()}
infer_meta :
func : PullBoxSparseInferMeta
kernel :
func : pull_box_sparse
data_type : ids

- op : pull_gpups_sparse
args : (Tensor w, Tensor[] ids, int[] size={}, bool is_sparse=false, bool is_distributed=false)
output : Tensor[](out){ids.size()}
infer_meta :
func : PullGpupsSparseInferMeta
kernel :
func : pull_gpups_sparse
data_type : PullGpupsSparseKernelKey
optional : w
backward: push_gpups_sparse

- op : pull_sparse_v2
args : (Tensor[] ids, Tensor[] w, int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctr_label_name = "", int padding_id = 0, bool scale_sparse_grad = true, str[] input_names = {}, bool is_distributed = true)
output : Tensor[](out){w.size()}
infer_meta :
func : PullSparseV2InferMeta
kernel :
func : pull_sparse_v2
data_type : DataType::FLOAT32
backward : pull_sparse_v2_grad

- op : push_dense
args : (Tensor[] ids, int table_id = -1, float scale_data_norm = -1.0f, str[] input_names = {})
output :
infer_meta :
func : PushDenseInferMeta
param : [ids, table_id, scale_data_norm, input_names]
kernel :
func : push_dense
data_type : DataType::FLOAT32

- op : push_sparse_v2
args : (Tensor[] ids, Tensor[] w, Tensor[] out_grad_in, int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctr_label_name = "", int padding_id = 0, bool scale_sparse_grad = true, str[] input_names = {}, bool is_distributed = true)
output : Tensor[](out_grad_out){out_grad_in.size()}
infer_meta :
func : UnchangedMultiInferMeta
param : [out_grad_in]
kernel :
func : push_sparse_v2
data_type : out_grad_in
inplace: (out_grad_in -> out_grad_out)

# Note: dequantize_linear and quantize_linear are supported using one op maker in fluid, the out_scale can't be used in dequantize_linear
# so ,the out_scale is optional. Currently, we can't modify the op definition of dequantize_linear/quantize_linear and it can cause incompatibility problem
# We need modify dequantize_linear/quantize_linear yaml and make it more reasonable when we abandon Fluid op.
Expand Down
47 changes: 0 additions & 47 deletions paddle/phi/ops/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3025,53 +3025,6 @@
outputs :
out : Out

- op : pull_box_sparse
inputs :
{ w : W, ids: Ids}
outputs :
out : Out
attrs :
sparse : is_sparse
extra :
attrs : [bool is_sparse = false, bool is_distributed = false, int size = 1]

- op : pull_gpups_sparse
backward : push_gpups_sparse
inputs :
{w : W, ids : Ids}
outputs :
out : Out

- op : pull_sparse_v2
inputs :
{ ids : Ids, w : W}
outputs :
out : Out
extra :
attrs : [int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctr_label_name = "", int padding_id = 0, bool scale_sparse_grad = true, 'str[] input_names = {}', bool is_distributed = true]

- op : push_box_sparse
inputs :
ids: Ids
outputs :
out : Out
attrs :
sparse : is_sparse

- op : push_dense
inputs :
ids : Ids
attrs :
{table_id : TableId, scale_data_norm : ScaleDataNorm, input_names: InputNames}

- op : push_sparse_v2
inputs :
{ x : Ids, W : w}
outputs :
out : Out
extra :
attrs : [int embedding_dim = 11, int table_id = 0, str accessor_class = "", str ctr_labe_lname = "", int padding_id = 0, bool scales_parse_grad = true, 'str[] input_names = {}', bool is_distributed = true]

- op : put_along_axis
backward : put_along_axis_grad
inputs :
Expand Down
Loading