Skip to content

Commit 765c70a

Browse files
authored
Unittest improve, test=develop (PaddlePaddle#16941)
* accelerate test_ir_memory_optimize_nlp, test=develop * accelerate test_ir_memory_optimize_nlp, test=develop
1 parent 23df084 commit 765c70a

File tree

1 file changed

+36
-40
lines changed

1 file changed

+36
-40
lines changed

python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py

Lines changed: 36 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,13 @@
3333

3434

3535
class BuildIrMemOptBase(unittest.TestCase):
36+
def setup_reader(self):
37+
self.batch_size = 32
38+
self.word_dict = paddle.dataset.imdb.word_dict()
39+
self.train_reader = paddle.batch(
40+
paddle.dataset.imdb.train(self.word_dict),
41+
batch_size=self.batch_size)
42+
3643
def check_network_convergence(self,
3744
network,
3845
use_cuda=True,
@@ -51,35 +58,34 @@ def check_network_convergence(self,
5158
return
5259
fluid.default_startup_program().random_seed = 100
5360
fluid.default_main_program().random_seed = 100
54-
batch_size = 32
55-
batch_size *= fluid.core.get_cuda_device_count() if use_cuda else int(
56-
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
57-
58-
# build network
59-
word_dict = paddle.dataset.imdb.word_dict()
60-
train_reader = paddle.batch(
61-
paddle.dataset.imdb.train(word_dict), batch_size=batch_size)
6261

6362
data = fluid.layers.data(
6463
name="words", shape=[1], dtype="int64", lod_level=1)
6564

6665
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
6766

68-
cost = network(data, label, len(word_dict))
67+
cost = network(data, label, len(self.word_dict))
6968
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
7069
optimizer.minimize(cost)
70+
build_strategy = fluid.BuildStrategy()
71+
build_strategy.enable_inplace = False
72+
build_strategy.memory_optimize = False
7173
if memory_opt:
7274
fluid.memory_optimize(fluid.default_main_program())
75+
else:
76+
build_strategy.enable_inplace = use_ir_memory_optimize
77+
build_strategy.memory_optimize = enable_inplace
7378

7479
# execution
7580
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
7681
feeder = fluid.DataFeeder(feed_list=[data, label], place=place)
77-
reader = feeder.decorate_reader(train_reader, multi_devices=True)
82+
reader = feeder.decorate_reader(self.train_reader, multi_devices=True)
7883
exe = fluid.Executor(place)
7984
exe.run(fluid.default_startup_program())
8085

8186
train_cp = compiler.CompiledProgram(fluid.default_main_program())
82-
train_cp = train_cp.with_data_parallel(loss_name=cost.name)
87+
train_cp = train_cp.with_data_parallel(
88+
loss_name=cost.name, build_strategy=build_strategy)
8389
fetch_list = [cost.name]
8490

8591
begin = time.time()
@@ -100,7 +106,7 @@ def check_network_convergence(self,
100106
end = time.time()
101107

102108
print("%.4f Instance per second" % (
103-
(batch_size * iter) / (end - begin)))
109+
(self.batch_size * iter) / (end - begin)))
104110

105111
print(first_loss, last_loss)
106112
avg_last_loss_val = np.array(last_loss).mean()
@@ -120,31 +126,21 @@ def test_network(self):
120126
if self.network is None or not core.is_compiled_with_cuda():
121127
return
122128

123-
baseline_first_loss, baseline_last_loss = None, None
124-
for use_cuda in [True]:
125-
for use_python_mem_opt in [True, False]:
126-
print(
127-
'network: {}, use_cuda: {}, use_python_mem_opt: {}, use_ir_mem_opt : {}'.
128-
format(self.network.__name__, use_cuda, use_python_mem_opt,
129-
not use_python_mem_opt))
130-
with fluid.program_guard(fluid.Program(), fluid.Program()):
131-
with fluid.scope_guard(core.Scope()):
132-
if use_cuda is True and use_python_mem_opt is True:
133-
baseline_first_loss, baseline_last_loss = self.check_network_convergence(
134-
self.network,
135-
use_cuda=use_cuda,
136-
memory_opt=use_python_mem_opt)
137-
else:
138-
cur_first_loss, cur_last_loss = self.check_network_convergence(
139-
self.network,
140-
use_cuda=use_cuda,
141-
memory_opt=use_python_mem_opt)
142-
143-
self.assertAlmostEquals(
144-
np.mean(baseline_last_loss),
145-
np.mean(cur_last_loss),
146-
delta=1e-2)
147-
self.assertAlmostEquals(
148-
np.mean(baseline_first_loss),
149-
np.mean(cur_first_loss),
150-
delta=1e-2)
129+
self.setup_reader()
130+
131+
with fluid.program_guard(fluid.Program(), fluid.Program()):
132+
with fluid.scope_guard(core.Scope()):
133+
baseline_first_loss, baseline_last_loss = self.check_network_convergence(
134+
self.network)
135+
136+
cur_first_loss, cur_last_loss = self.check_network_convergence(
137+
self.network, memory_opt=False)
138+
139+
self.assertAlmostEquals(
140+
np.mean(baseline_last_loss),
141+
np.mean(cur_last_loss),
142+
delta=1e-6)
143+
self.assertAlmostEquals(
144+
np.mean(baseline_first_loss),
145+
np.mean(cur_first_loss),
146+
delta=1e-6)

0 commit comments

Comments
 (0)