Skip to content

Commit 273783b

Browse files
longranger2zhwesky2010ForFishesLiyulingyueSigureMo
authored
remove op.py in fluid (#52248)
* remove op.py * [Zero-Dim] change Tensor.numpy() usage to other equivalent usage, avoid hack (#52197) * [BugFix] fix compute error in fused_dropout_add (#52261) * fix bg * add utest * add utest * [CodeStyle][UP034] remove (()) cases (#52060) * add up34 * modify var name in loop * revert changes in test_slice * Revert "modify var name in loop" This reverts commit 6d748e3. * temporarily ignore test_slice.py * add comment * empty commit, re-trigger all ci * fix inc --------- Co-authored-by: SigureMo <sigure.qaq@gmail.com> * [AMP OP&Test] add unittest for log_softmax (#52264) * Fix_Linux_[-Wterminate]warning (#52186) * [CustomOP Inplace] Automap inplace dtype and shape, prepare for vector<Tensor> output (#52214) * [CustomOP Inplace] Automap inplace dtype and shape, prepare for vector<Tensor> output * delete dtype,shape func of multi_inplace op * [CustomOP Inplace] Automap inplace dtype and shape, support vector<Tensor> output * [CustomOP Inplace] Auto-generate python API for inplace vector<Tensor> output * [AMP OP&Test] add float16 optest for reshape_op (#51678) * [AMP OP&Test] add float16 optest for reshape_op * add public_python_api * [AMP OP&Test] Add fp16/bf16 to clip op (#52158) * add fp16/bf16 to clip op * fix as reviewed * update test_clip_op.py * update test_clip_op.py * fix bug * fix code style * fix bug * fix bug --------- Co-authored-by: Zhou Wei <1183042833@qq.com> Co-authored-by: ShenLiang <1422485404@qq.com> Co-authored-by: 张春乔 <83450930+Liyulingyue@users.noreply.github.com> Co-authored-by: SigureMo <sigure.qaq@gmail.com> Co-authored-by: Ccc <52520497+juncaipeng@users.noreply.github.com> Co-authored-by: Galaxy1458 <55453380+Galaxy1458@users.noreply.github.com> Co-authored-by: HongyuJia <jiahongyu@baidu.com> Co-authored-by: zhaoyingli <86812880+zhaoyinglia@users.noreply.github.com> Co-authored-by: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com>
1 parent c85a0c5 commit 273783b

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+51
-115
lines changed

python/paddle/fluid/tests/unittests/benchmark.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,7 @@
1616

1717
import numpy as np
1818
from eager_op_test import OpTest
19-
20-
from paddle.fluid.op import Operator
19+
from op import Operator
2120

2221

2322
class BenchmarkSuite(OpTest):

python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_momentum_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
from paddle import fluid
2020
from paddle.fluid import core
21-
from paddle.fluid.op import Operator
21+
from paddle.fluid.tests.unittests.op import Operator
2222

2323

2424
class TestDGCMomentumOp1(unittest.TestCase):

python/paddle/fluid/tests/unittests/collective/fleet/test_dgc_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
from paddle import fluid
2020
from paddle.fluid import core
21-
from paddle.fluid.op import Operator
21+
from paddle.fluid.tests.unittests.op import Operator
2222

2323
g_array_size = 102400
2424

python/paddle/fluid/tests/unittests/collective/fleet/test_recv_save_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from paddle import fluid
2626
from paddle.fluid import core
2727
from paddle.fluid.framework import Program, program_guard
28-
from paddle.fluid.op import Operator
28+
from paddle.fluid.tests.unittests.op import Operator
2929
from paddle.incubate.distributed.fleet.parameter_server.mode import (
3030
DistributedMode,
3131
)

python/paddle/fluid/tests/unittests/eager_op_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
from copy import copy
2424

2525
import numpy as np
26+
from op import Operator
2627

2728
import paddle
2829
from paddle import fluid
@@ -35,7 +36,6 @@
3536
_current_expected_place,
3637
canonicalize_attrs,
3738
)
38-
from paddle.fluid.op import Operator
3939

4040
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
4141
from prim_op_test import OpTestUtils, PrimForwardChecker, PrimGradChecker

python/paddle/fluid/op.py renamed to python/paddle/fluid/tests/unittests/op.py

+5-70
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,9 @@
1414

1515
import numpy as np
1616

17-
import paddle.fluid.core as core
18-
import paddle.fluid.proto.framework_pb2 as framework_pb2
17+
from paddle.fluid import core
18+
from paddle.fluid.proto import framework_pb2
19+
1920

2021
# NOTE: this is added to support creating a Scalar message
2122
# from a python number
@@ -256,13 +257,13 @@ def __impl__(*args, **kwargs):
256257
inputs=[(var.name, var.duplicable) for var in op_proto.inputs],
257258
outputs=[(var.name, var.duplicable) for var in op_proto.outputs],
258259
attrs=[attr.name for attr in op_proto.attrs],
259-
extra_attrs=[item for item in extra_attrs_map.keys()],
260+
extra_attrs=list(extra_attrs_map.keys()),
260261
)
261262

262263

263264
class OperatorFactory:
264265
def __init__(self):
265-
self.op_methods = dict()
266+
self.op_methods = {}
266267

267268
for op_proto in get_all_op_protos():
268269
method = create_op_creation_method(op_proto)
@@ -313,70 +314,4 @@ def get_op_extra_attr_names(self, type):
313314
return self.get_op_info(type).extra_attrs
314315

315316

316-
class __RecurrentOp__:
317-
__proto__ = None
318-
type = "recurrent"
319-
320-
def __init__(self):
321-
# cache recurrent_op's proto
322-
if self.__proto__ is None:
323-
for op_proto in get_all_op_protos():
324-
if op_proto.type == self.type:
325-
self.__proto__ = op_proto
326-
327-
def __call__(self, *args, **kwargs):
328-
if self.type not in args and "type" not in kwargs:
329-
kwargs["type"] = self.type
330-
# create proto
331-
create_method = OpDescCreationMethod(self.__proto__)
332-
proto = create_method(*args, **kwargs)
333-
# create rnnop
334-
return core.RecurrentOp.create(proto.SerializeToString())
335-
336-
337-
class __DynamicRecurrentOp__:
338-
__proto__ = None
339-
type = "dynamic_recurrent"
340-
341-
def __init__(self):
342-
# cache recurrent_op's proto
343-
if self.__proto__ is None:
344-
for op_proto in get_all_op_protos():
345-
if op_proto.type == self.type:
346-
self.__proto__ = op_proto
347-
348-
def __call__(self, *args, **kwargs):
349-
if self.type not in args and "type" not in kwargs:
350-
kwargs["type"] = self.type
351-
# create proto
352-
create_method = OpDescCreationMethod(self.__proto__)
353-
proto = create_method(*args, **kwargs)
354-
# create rnnop
355-
return core.DynamicRecurrentOp.create(proto.SerializeToString())
356-
357-
358-
class __CondOp__:
359-
__proto__ = None
360-
type = "cond"
361-
362-
def __init__(self):
363-
# cache recurrent_op's proto
364-
if self.__proto__ is None:
365-
for op_proto in get_all_op_protos():
366-
if op_proto.type == self.type:
367-
self.__proto__ = op_proto
368-
369-
def __call__(self, *args, **kwargs):
370-
if self.type not in args and "type" not in kwargs:
371-
kwargs["type"] = self.type
372-
# create proto
373-
create_method = OpDescCreationMethod(self.__proto__)
374-
proto = create_method(*args, **kwargs)
375-
# create condop
376-
return core.CondOp.create(proto.SerializeToString())
377-
378-
379317
Operator = OperatorFactory() # The default global factory
380-
RecurrentOp = __RecurrentOp__()
381-
DynamicRecurrentOp = __DynamicRecurrentOp__()
382-
CondOp = __CondOp__()

python/paddle/fluid/tests/unittests/test_activation_sparse_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
import unittest
1616

1717
import numpy as np
18+
from op import Operator
1819

1920
import paddle
2021
from paddle.fluid import core
21-
from paddle.fluid.op import Operator
2222

2323

2424
class TestSparseSquareOp(unittest.TestCase):

python/paddle/fluid/tests/unittests/test_adagrad_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@
1717

1818
import numpy as np
1919
from eager_op_test import OpTest
20+
from op import Operator
2021

2122
import paddle
2223
from paddle.fluid import core
23-
from paddle.fluid.op import Operator
2424

2525

2626
def adamgrad_wrapper(

python/paddle/fluid/tests/unittests/test_adam_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@
1616

1717
import numpy as np
1818
from eager_op_test import OpTest
19+
from op import Operator
1920

2021
import paddle
2122
from paddle import fluid
2223
from paddle.fluid import core
23-
from paddle.fluid.op import Operator
2424

2525

2626
def adam_wrapper(

python/paddle/fluid/tests/unittests/test_batch_norm_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,12 @@
1717

1818
import numpy as np
1919
from eager_op_test import OpTest, _set_use_system_allocator
20+
from op import Operator
2021

2122
import paddle
2223
from paddle import fluid
2324
from paddle.fluid import Program, core, program_guard
2425
from paddle.fluid.framework import grad_var_name
25-
from paddle.fluid.op import Operator
2626

2727
_set_use_system_allocator(True)
2828

python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
import unittest
1616

1717
import numpy as np
18+
from op import Operator
1819

1920
from paddle.fluid import core
20-
from paddle.fluid.op import Operator
2121

2222

2323
class TestBeamSearchDecodeOp(unittest.TestCase):

python/paddle/fluid/tests/unittests/test_beam_search_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
import unittest
1616

1717
import numpy as np
18+
from op import Operator
1819

1920
from paddle.fluid import core
20-
from paddle.fluid.op import Operator
2121

2222

2323
def create_tensor(scope, name, np_data):

python/paddle/fluid/tests/unittests/test_clip_by_norm_op.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@
1616

1717
import numpy as np
1818
from eager_op_test import OpTest
19+
from op import Operator
1920

2021
import paddle
21-
from paddle import fluid
2222
from paddle.fluid import core
2323
from paddle.nn import clip
2424

@@ -119,7 +119,7 @@ def check_with_place(self, place):
119119
out_selected_rows = scope.var('Out').get_selected_rows()
120120

121121
# run clip_by_norm_op
122-
clip_by_norm_op = fluid.op.Operator(
122+
clip_by_norm_op = Operator(
123123
"clip_by_norm", max_norm=self.max_norm, X='X', Out='Out'
124124
)
125125
clip_by_norm_op.run(scope, place)

python/paddle/fluid/tests/unittests/test_data_norm_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@
1717

1818
import numpy as np
1919
from eager_op_test import OpTest
20+
from op import Operator
2021

2122
import paddle
2223
from paddle import fluid
2324
from paddle.fluid import Program, core, program_guard
24-
from paddle.fluid.op import Operator
2525

2626

2727
def _reference_testing(x, batch_size, batch_sum, batch_square_sum, slot_dim=-1):

python/paddle/fluid/tests/unittests/test_fake_init_op.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,9 @@
1414

1515
import unittest
1616

17+
from op import Operator
18+
1719
from paddle.fluid import core
18-
from paddle.fluid.op import Operator
1920

2021

2122
class TestFakeInitOpSelectedRows(unittest.TestCase):

python/paddle/fluid/tests/unittests/test_fill_constant_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@
1616

1717
import numpy as np
1818
from eager_op_test import OpTest, convert_float_to_uint16
19+
from op import Operator
1920

2021
import paddle
2122
from paddle import fluid
2223
from paddle.fluid import Program, core, program_guard
23-
from paddle.fluid.op import Operator
2424

2525

2626
def fill_wrapper(shape, value=0.0):

python/paddle/fluid/tests/unittests/test_fill_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@
1616

1717
import numpy as np
1818
from eager_op_test import OpTest, convert_float_to_uint16
19+
from op import Operator
1920

2021
from paddle.fluid import core
21-
from paddle.fluid.op import Operator
2222

2323

2424
class TestFillOp1(OpTest):

python/paddle/fluid/tests/unittests/test_ftrl_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@
1616

1717
import numpy as np
1818
from eager_op_test import OpTest
19+
from op import Operator
1920

2021
from paddle.fluid import core
21-
from paddle.fluid.op import Operator
2222

2323

2424
def ftrl_step(param, grad, rows, sq_accum, lin_accum, lr, l1, l2, lr_power):

python/paddle/fluid/tests/unittests/test_get_tensor_from_selected_rows_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
import unittest
1616

1717
import numpy as np
18+
from op import Operator
1819

1920
import paddle
2021
from paddle.fluid import Program, core, program_guard
21-
from paddle.fluid.op import Operator
2222
from paddle.nn import clip
2323

2424

python/paddle/fluid/tests/unittests/test_lamb_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,10 @@
1616

1717
import numpy as np
1818
from eager_op_test import OpTest
19+
from op import Operator
1920

2021
import paddle
2122
from paddle.fluid import core
22-
from paddle.fluid.op import Operator
2323

2424
paddle.enable_static()
2525

python/paddle/fluid/tests/unittests/test_lookup_table_bf16_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,11 @@
1515
import unittest
1616

1717
import numpy as np
18+
from op import Operator
1819

1920
import paddle
2021
from paddle import enable_static, fluid
2122
from paddle.fluid import core
22-
from paddle.fluid.op import Operator
2323
from paddle.fluid.tests.unittests.eager_op_test import (
2424
OpTest,
2525
convert_float_to_uint16,

python/paddle/fluid/tests/unittests/test_lookup_table_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,12 @@
2121
paddle_static_guard,
2222
skip_check_grad_ci,
2323
)
24+
from op import Operator
2425

2526
import paddle
2627
import paddle.nn.functional as F
2728
from paddle import fluid
2829
from paddle.fluid import Program, core, program_guard
29-
from paddle.fluid.op import Operator
3030

3131

3232
class TestLookupTableOp(OpTest):

python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@
1616

1717
import numpy as np
1818
from eager_op_test import OpTest, skip_check_grad_ci
19+
from op import Operator
1920

2021
import paddle
2122
from paddle import fluid
2223
from paddle.fluid import Program, core, program_guard
23-
from paddle.fluid.op import Operator
2424

2525

2626
class TestStaticGraphSupportMultipleInt(unittest.TestCase):

python/paddle/fluid/tests/unittests/test_merge_selectedrows_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
import unittest
1616

1717
import numpy as np
18+
from op import Operator
1819

1920
from paddle.fluid import core
20-
from paddle.fluid.op import Operator
2121

2222

2323
class TestMergeSelectedRows(unittest.TestCase):

python/paddle/fluid/tests/unittests/test_momentum_op.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@
1717
import numpy
1818
import numpy as np
1919
from eager_op_test import OpTest
20+
from op import Operator
2021

2122
import paddle
2223
from paddle import fluid
2324
from paddle.fluid import core
24-
from paddle.fluid.op import Operator
2525

2626

2727
def calculate_momentum_by_numpy(

0 commit comments

Comments
 (0)