Skip to content

【Fix PIR Unittest No.482 BUAA】Fix some test case in PIR #66209

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion test/deprecated/legacy_test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -716,7 +716,6 @@ set(STATIC_BUILD_TESTS
test_fuse_bn_act_pass_deprecated
test_layer_norm_op_deprecated
test_lookup_table_v2_op_deprecated
test_momentum_op
test_momentum_op_deprecated
test_nce_deprecated
test_sparse_conv_op
Expand Down
1 change: 1 addition & 0 deletions test/legacy_test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1058,6 +1058,7 @@ set(STATIC_BUILD_TESTS
test_layer_norm_op
test_eigh_op
test_matmul_v2_op
test_momentum_op
test_paddle_save_load_binary
test_assign_pos_op
test_bucketize_api
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -760,59 +760,68 @@ def get_program(self, weight_attr, bias_attr=False):

def test_param_has_l2decay(self):
paddle.enable_static()
weight_attr = paddle.ParamAttr(
name="weight",
initializer=paddle.nn.initializer.Constant(value=0.5),
regularizer=paddle.regularizer.L2Decay(0.1),
)
program = self.get_program(weight_attr, bias_attr=False)
ops = program.global_block().ops
with paddle.pir_utils.OldIrGuard():
weight_attr = paddle.ParamAttr(
name="weight",
initializer=paddle.nn.initializer.Constant(value=0.5),
regularizer=paddle.regularizer.L2Decay(0.1),
)
program = self.get_program(weight_attr, bias_attr=False)
ops = program.global_block().ops

self.assertEqual(ops[-1].attr('regularization_method'), 'l2_decay')
self.assertEqual(ops[-1].attr('regularization_coeff'), np.float32(0.1))
for i in range(len(ops)):
self.assertTrue('sum' not in ops[i].type)
self.assertTrue('scale' not in ops[i].type)
self.assertEqual(ops[-1].attr('regularization_method'), 'l2_decay')
self.assertEqual(
ops[-1].attr('regularization_coeff'), np.float32(0.1)
)
for i in range(len(ops)):
self.assertTrue('sum' not in ops[i].type)
self.assertTrue('scale' not in ops[i].type)

def test_param_has_l1decay(self):
paddle.enable_static()
weight_attr = paddle.ParamAttr(
name="weight",
initializer=paddle.nn.initializer.Constant(value=0.5),
regularizer=paddle.regularizer.L1Decay(0.1),
)
bias_attr = paddle.ParamAttr(
name="bias",
initializer=paddle.nn.initializer.Constant(value=0.0),
regularizer=None,
)
program = self.get_program(weight_attr, bias_attr)
ops = program.global_block().ops

self.assertEqual(ops[-1].type, 'momentum')
self.assertEqual(ops[-2].type, 'momentum')
self.assertEqual(ops[-3].type, 'sum')
self.assertEqual(ops[-4].type, 'scale')
self.assertEqual(ops[-5].type, 'sign')
self.assertEqual(ops[-6].type, 'matmul_v2_grad')
if 'weight' in ops[-1].input('Param'):
self.assertEqual(ops[-1].attr('regularization_method'), '')
self.assertEqual(ops[-1].attr('regularization_coeff'), 0)
if 'bias' in ops[-2].input('Param'):
self.assertEqual(ops[-2].attr('regularization_method'), 'l2_decay')
self.assertEqual(
ops[-2].attr('regularization_coeff'), np.float32(0.5)
with paddle.pir_utils.OldIrGuard():
weight_attr = paddle.ParamAttr(
name="weight",
initializer=paddle.nn.initializer.Constant(value=0.5),
regularizer=paddle.regularizer.L1Decay(0.1),
)
bias_attr = paddle.ParamAttr(
name="bias",
initializer=paddle.nn.initializer.Constant(value=0.0),
regularizer=None,
)
program = self.get_program(weight_attr, bias_attr)
ops = program.global_block().ops

self.assertEqual(ops[-1].type, 'momentum')
self.assertEqual(ops[-2].type, 'momentum')
self.assertEqual(ops[-3].type, 'sum')
self.assertEqual(ops[-4].type, 'scale')
self.assertEqual(ops[-5].type, 'sign')
self.assertEqual(ops[-6].type, 'matmul_v2_grad')
if 'weight' in ops[-1].input('Param'):
self.assertEqual(ops[-1].attr('regularization_method'), '')
self.assertEqual(ops[-1].attr('regularization_coeff'), 0)
if 'bias' in ops[-2].input('Param'):
self.assertEqual(
ops[-2].attr('regularization_method'), 'l2_decay'
)
self.assertEqual(
ops[-2].attr('regularization_coeff'), np.float32(0.5)
)

def test_param_has_no_regularizer(self):
paddle.enable_static()
program = self.get_program(weight_attr=None)
ops = program.global_block().ops
self.assertEqual(ops[-1].attr('regularization_method'), 'l2_decay')
self.assertEqual(ops[-1].attr('regularization_coeff'), np.float32(0.5))
for i in range(len(ops)):
self.assertTrue('sum' not in ops[i].type)
self.assertTrue('scale' not in ops[i].type)
with paddle.pir_utils.OldIrGuard():
program = self.get_program(weight_attr=None)
ops = program.global_block().ops
self.assertEqual(ops[-1].attr('regularization_method'), 'l2_decay')
self.assertEqual(
ops[-1].attr('regularization_coeff'), np.float32(0.5)
)
for i in range(len(ops)):
self.assertTrue('sum' not in ops[i].type)
self.assertTrue('scale' not in ops[i].type)


class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase):
Expand Down