Skip to content

Commit 2280f18

Browse files
authored
BuildStrategy api comment (#17348)
Python examples of fluid.layers.io.double_buffer and some BuildStrategy's methods.
1 parent 5b2a3c4 commit 2280f18

File tree

3 files changed

+90
-30
lines changed

3 files changed

+90
-30
lines changed

paddle/fluid/API.spec

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@ paddle.fluid.layers.open_files (ArgSpec(args=['filenames', 'shapes', 'lod_levels
242242
paddle.fluid.layers.read_file (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', '32181f6037e387fb6e68a5beaafe33b6'))
243243
paddle.fluid.layers.shuffle (ArgSpec(args=['reader', 'buffer_size'], varargs=None, keywords=None, defaults=None), ('document', 'f967a73426db26f970bc70bfb03cffca'))
244244
paddle.fluid.layers.batch (ArgSpec(args=['reader', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', 'fcb24383c6eef2ca040ee824c26e22fd'))
245-
paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '07e5b796674796eb1ef3fee9c10d24e3'))
245+
paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'c13b8a8521bea5f8123b925ae2a5d5db'))
246246
paddle.fluid.layers.random_data_generator (ArgSpec(args=['low', 'high', 'shapes', 'lod_levels', 'for_parallel'], varargs=None, keywords=None, defaults=(True,)), ('document', '9b7f0f86ec24bbc97643cadcb6499cff'))
247247
paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '5c54493d96c7e0760dc6758af1c8dd72'))
248248
paddle.fluid.layers.create_py_reader_by_data (ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)), ('document', 'b42332b894e1e0962c6a43f0151c2640'))

paddle/fluid/pybind/pybind.cc

Lines changed: 87 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1334,14 +1334,9 @@ All parameter, weight, gradient are variables in Paddle.
13341334
Examples:
13351335
.. code-block:: python
13361336
1337-
build_strategy = fluid.BuildStrategy()
1338-
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
1339-
1340-
train_exe = fluid.ParallelExecutor(use_cuda=True,
1341-
loss_name=loss.name,
1342-
build_strategy=build_strategy)
1343-
1344-
train_loss, = train_exe.run([loss.name], feed=feed_dict)
1337+
import paddle.fluid as fluid
1338+
build_strategy = fluid.BuildStrategy()
1339+
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
13451340
)DOC");
13461341

13471342
py::enum_<BuildStrategy::ReduceStrategy>(build_strategy, "ReduceStrategy")
@@ -1363,11 +1358,19 @@ All parameter, weight, gradient are variables in Paddle.
13631358
self.reduce_ = strategy;
13641359
},
13651360
R"DOC(The type is STR, there are two reduce strategies in ParallelExecutor,
1366-
'AllReduce' and 'Reduce'. If you want that all the parameters'
1367-
optimization are done on all devices independently, you should choose 'AllReduce';
1368-
if you choose 'Reduce', all the parameters' optimization will be evenly distributed
1369-
to different devices, and then broadcast the optimized parameter to other devices.
1370-
In some models, `Reduce` is faster. Default 'AllReduce'. )DOC")
1361+
'AllReduce' and 'Reduce'. If you want that all the parameters'
1362+
optimization are done on all devices independently, you should choose 'AllReduce';
1363+
if you choose 'Reduce', all the parameters' optimization will be evenly distributed
1364+
to different devices, and then broadcast the optimized parameter to other devices.
1365+
In some models, `Reduce` is faster. Default 'AllReduce'.
1366+
1367+
Examples:
1368+
.. code-block:: python
1369+
1370+
import paddle.fluid as fluid
1371+
build_strategy = fluid.BuildStrategy()
1372+
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
1373+
)DOC")
13711374
.def_property(
13721375
"gradient_scale_strategy",
13731376
[](const BuildStrategy &self) { return self.gradient_scale_; },
@@ -1377,10 +1380,18 @@ All parameter, weight, gradient are variables in Paddle.
13771380
self.gradient_scale_ = strategy;
13781381
},
13791382
R"DOC(The type is STR, there are three ways of defining :math:`loss@grad` in
1380-
ParallelExecutor, 'CoeffNumDevice', 'One' and 'Customized'. By default,
1381-
ParallelExecutor sets the :math:`loss@grad` according to the number of devices.
1382-
If you want to customize :math:`loss@grad`, you can choose 'Customized'.
1383-
Default 'CoeffNumDevice'.)DOC")
1383+
ParallelExecutor, 'CoeffNumDevice', 'One' and 'Customized'. By default,
1384+
ParallelExecutor sets the :math:`loss@grad` according to the number of devices.
1385+
If you want to customize :math:`loss@grad`, you can choose 'Customized'.
1386+
Default 'CoeffNumDevice'.
1387+
1388+
Examples:
1389+
.. code-block:: python
1390+
1391+
import paddle.fluid as fluid
1392+
build_strategy = fluid.BuildStrategy()
1393+
build_strategy.gradient_scale_strategy = True
1394+
)DOC")
13841395
.def_property(
13851396
"debug_graphviz_path",
13861397
[](const BuildStrategy &self) { return self.debug_graphviz_path_; },
@@ -1389,8 +1400,16 @@ All parameter, weight, gradient are variables in Paddle.
13891400
self.debug_graphviz_path_ = path;
13901401
},
13911402
R"DOC(The type is STR, debug_graphviz_path indicate the path that
1392-
writing the SSA Graph to file in the form of graphviz, you.
1393-
It is useful for debugging. Default "")DOC")
1403+
writing the SSA Graph to file in the form of graphviz.
1404+
It is useful for debugging. Default ""
1405+
1406+
Examples:
1407+
.. code-block:: python
1408+
1409+
import paddle.fluid as fluid
1410+
build_strategy = fluid.BuildStrategy()
1411+
build_strategy.debug_graphviz_path = ""
1412+
)DOC")
13941413
.def_property(
13951414
"enable_sequential_execution",
13961415
[](const BuildStrategy &self) {
@@ -1400,7 +1419,15 @@ All parameter, weight, gradient are variables in Paddle.
14001419
PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
14011420
self.enable_sequential_execution_ = b;
14021421
},
1403-
R"DOC(The type is BOOL. If set True, the execution order of ops would be the same as what is in the program. Default False.)DOC")
1422+
R"DOC(The type is BOOL. If set True, the execution order of ops would be the same as what is in the program. Default False.
1423+
1424+
Examples:
1425+
.. code-block:: python
1426+
1427+
import paddle.fluid as fluid
1428+
build_strategy = fluid.BuildStrategy()
1429+
build_strategy.enable_sequential_execution = True
1430+
)DOC")
14041431
.def_property(
14051432
"remove_unnecessary_lock",
14061433
[](const BuildStrategy &self) {
@@ -1410,7 +1437,15 @@ All parameter, weight, gradient are variables in Paddle.
14101437
PADDLE_ENFORCE(!self.IsFinalized(), "BuildStrategy is finlaized.");
14111438
self.remove_unnecessary_lock_ = b;
14121439
},
1413-
R"DOC(The type is BOOL. If set True, some locks in GPU ops would be released and ParallelExecutor would run faster. Default True.)DOC")
1440+
R"DOC(The type is BOOL. If set True, some locks in GPU ops would be released and ParallelExecutor would run faster. Default True.
1441+
1442+
Examples:
1443+
.. code-block:: python
1444+
1445+
import paddle.fluid as fluid
1446+
build_strategy = fluid.BuildStrategy()
1447+
build_strategy.remove_unnecessary_lock = True
1448+
)DOC")
14141449
.def_property(
14151450
"num_trainers",
14161451
[](const BuildStrategy &self) { return self.num_trainers_; },
@@ -1439,8 +1474,16 @@ All parameter, weight, gradient are variables in Paddle.
14391474
self.fuse_elewise_add_act_ops_ = b;
14401475
},
14411476
R"DOC(The type is BOOL, fuse_elewise_add_act_ops indicate whether
1442-
to fuse elementwise_add_op and activation_op,
1443-
it may make the execution faster. Default False)DOC")
1477+
to fuse elementwise_add_op and activation_op,
1478+
it may make the execution faster. Default False
1479+
1480+
Examples:
1481+
.. code-block:: python
1482+
1483+
import paddle.fluid as fluid
1484+
build_strategy = fluid.BuildStrategy()
1485+
build_strategy.fuse_elewise_add_act_ops = True
1486+
)DOC")
14441487
.def_property(
14451488
"fuse_relu_depthwise_conv",
14461489
[](const BuildStrategy &self) {
@@ -1451,10 +1494,18 @@ All parameter, weight, gradient are variables in Paddle.
14511494
self.fuse_relu_depthwise_conv_ = b;
14521495
},
14531496
R"DOC(The type is BOOL, fuse_relu_depthwise_conv indicate whether
1454-
to fuse relu and depthwise_conv2d,
1455-
it will save GPU memory and may make the execution faster.
1456-
This options is only available in GPU devices.
1457-
Default False.)DOC")
1497+
to fuse relu and depthwise_conv2d,
1498+
it will save GPU memory and may make the execution faster.
1499+
This options is only available in GPU devices.
1500+
Default False.
1501+
1502+
Examples:
1503+
.. code-block:: python
1504+
1505+
import paddle.fluid as fluid
1506+
build_strategy = fluid.BuildStrategy()
1507+
build_strategy.fuse_relu_depthwise_conv = True
1508+
)DOC")
14581509
.def_property(
14591510
"fuse_broadcast_ops",
14601511
[](const BuildStrategy &self) { return self.fuse_broadcast_ops_; },
@@ -1491,7 +1542,15 @@ All parameter, weight, gradient are variables in Paddle.
14911542
Current implementation doesn't support FP16 training and CPU.
14921543
And only synchronous on one machine, not all machines.
14931544
1494-
Default False)DOC")
1545+
Default False
1546+
1547+
Examples:
1548+
.. code-block:: python
1549+
1550+
import paddle.fluid as fluid
1551+
build_strategy = fluid.BuildStrategy()
1552+
build_strategy.sync_batch_norm = True
1553+
)DOC")
14951554
.def_property(
14961555
"memory_optimize",
14971556
[](const BuildStrategy &self) { return self.memory_optimize_; },

python/paddle/fluid/layers/io.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1055,7 +1055,8 @@ def double_buffer(reader, place=None, name=None):
10551055
10561056
Examples:
10571057
1058-
>>> reader = fluid.layers.open_files(filenames=['somefile'],
1058+
>>> import paddle.fluid as fluid
1059+
>>> reader = fluid.layers.open_files(filenames=['mnist.recordio'],
10591060
>>> shapes=[[-1, 784], [-1, 1]],
10601061
>>> dtypes=['float32', 'int64'])
10611062
>>> reader = fluid.layers.double_buffer(reader)

0 commit comments

Comments
 (0)