Skip to content

Commit cf5d271

Browse files
authored
Fix examples of fluid.layers.sums and fluid.layers.DynamicRNN (#17308)
* Fix examples of fluid.layers.sums. test=document_preview * Correct the example of DynamicRNN and its functions. test=develop * Add 'import paddle.fluid as fluid' to examples. test=develop * Update API.spec. test=develop * Add space lines. test=develop * Update the API.spec. test=develop
1 parent 2281ebf commit cf5d271

File tree

3 files changed

+104
-70
lines changed

3 files changed

+104
-70
lines changed

paddle/fluid/API.spec

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ paddle.fluid.layers.create_global_var (ArgSpec(args=['shape', 'value', 'dtype',
257257
paddle.fluid.layers.cast (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '992eb42590fc1c380841a6db72ce78b3'))
258258
paddle.fluid.layers.tensor_array_to_tensor (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', 'b12717d3d4567e6119589f7f655b0cbb'))
259259
paddle.fluid.layers.concat (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'f9e905b48123914c78055a45fe23106a'))
260-
paddle.fluid.layers.sums (ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '42912092418620b4be07f36af31e7816'))
260+
paddle.fluid.layers.sums (ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '5df743d578638cd2bbb9369499b44af4'))
261261
paddle.fluid.layers.assign (ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b690184f3537df5501e4d9d8f31152a5'))
262262
paddle.fluid.layers.fill_constant_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx'], varargs=None, keywords=None, defaults=(0, 0)), ('document', 'd4059a2f5763036b07018d76429f9acb'))
263263
paddle.fluid.layers.fill_constant (ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)), ('document', '1d8b14729639fa38509c79b9784740fa'))
@@ -297,10 +297,10 @@ paddle.fluid.layers.IfElse.output (ArgSpec(args=['self'], varargs='outs', keywor
297297
paddle.fluid.layers.IfElse.true_block (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
298298
paddle.fluid.layers.DynamicRNN.__init__ (ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
299299
paddle.fluid.layers.DynamicRNN.block (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6d3e0a5d9aa519a9773a36e1620ea9b7'))
300-
paddle.fluid.layers.DynamicRNN.memory (ArgSpec(args=['self', 'init', 'shape', 'value', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, False, 'float32')), ('document', 'b9174d4e91505b0c8ecc193eb51e248d'))
300+
paddle.fluid.layers.DynamicRNN.memory (ArgSpec(args=['self', 'init', 'shape', 'value', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, False, 'float32')), ('document', '57cdd0a63747f4c670cdb9d250ceb7e1'))
301301
paddle.fluid.layers.DynamicRNN.output (ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None), ('document', 'b439a176a3328de8a75bdc5c08eece4a'))
302-
paddle.fluid.layers.DynamicRNN.static_input (ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', 'f29ad2478b6b2ad4f413d2936a331ea0'))
303-
paddle.fluid.layers.DynamicRNN.step_input (ArgSpec(args=['self', 'x', 'level'], varargs=None, keywords=None, defaults=(0,)), ('document', '7568c5ac7622a10288d3307a94134655'))
302+
paddle.fluid.layers.DynamicRNN.static_input (ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None), ('document', '55ab9c562edd7dabec0bd6fd6c1a28cc'))
303+
paddle.fluid.layers.DynamicRNN.step_input (ArgSpec(args=['self', 'x', 'level'], varargs=None, keywords=None, defaults=(0,)), ('document', '4b300851b5201891d0e11c406e4c7d07'))
304304
paddle.fluid.layers.DynamicRNN.update_memory (ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None), ('document', '5d83987da13b98363d6a807a52d8024f'))
305305
paddle.fluid.layers.StaticRNN.__init__ (ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
306306
paddle.fluid.layers.StaticRNN.memory (ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1)), ('document', 'f1b60dc4194d0bb714d6c6f5921b227f'))

python/paddle/fluid/layers/control_flow.py

Lines changed: 83 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -1645,23 +1645,7 @@ class DynamicRNN(object):
16451645
sample sequence can be different. This API automatically process them in
16461646
batch.
16471647
1648-
The input lod must be set. Please reference `lod_tensor`
1649-
1650-
>>> import paddle.fluid as fluid
1651-
>>> data = fluid.layers.data(name='sentence', dtype='int64', lod_level=1)
1652-
>>> embedding = fluid.layers.embedding(input=data, size=[65535, 32],
1653-
>>> is_sparse=True)
1654-
>>>
1655-
>>> drnn = fluid.layers.DynamicRNN()
1656-
>>> with drnn.block():
1657-
>>> word = drnn.step_input(embedding)
1658-
>>> prev = drnn.memory(shape=[200])
1659-
>>> hidden = fluid.layers.fc(input=[word, prev], size=200, act='relu')
1660-
>>> drnn.update_memory(prev, hidden) # set prev to hidden
1661-
>>> drnn.output(hidden)
1662-
>>>
1663-
>>> # last is the last time step of rnn. It is the encoding result.
1664-
>>> last = fluid.layers.sequence_last_step(drnn())
1648+
The input lod must be set. Please reference to `lod_tensor`.
16651649
16661650
The dynamic RNN will unfold sequence into timesteps. Users need to define
16671651
how to process each time step during the :code:`with` block.
@@ -1671,10 +1655,30 @@ class DynamicRNN(object):
16711655
16721656
The dynamic RNN can mark multiple variables as its output. Use `drnn()` to
16731657
get the output sequence.
1674-
1658+
16751659
NOTES:
16761660
Currently it is not supported that setting is_sparse to True of any
16771661
layers within DynamicRNN.
1662+
1663+
Examples:
1664+
.. code-block:: python
1665+
1666+
import paddle.fluid as fluid
1667+
1668+
sentence = fluid.layers.data(name='sentence', shape=[1], dtype='int64', lod_level=1)
1669+
embedding = fluid.layers.embedding(input=sentence, size=[65536, 32], is_sparse=True)
1670+
1671+
drnn = fluid.layers.DynamicRNN()
1672+
with drnn.block():
1673+
word = drnn.step_input(embedding)
1674+
prev = drnn.memory(shape=[200])
1675+
hidden = fluid.layers.fc(input=[word, prev], size=200, act='relu')
1676+
drnn.update_memory(prev, hidden) # set prev to hidden
1677+
drnn.output(hidden)
1678+
1679+
# Get the last time step of rnn. It is the encoding result.
1680+
rnn_output = drnn()
1681+
last = fluid.layers.sequence_last_step(rnn_output)
16781682
"""
16791683
BEFORE_RNN = 0
16801684
IN_RNN = 1
@@ -1701,8 +1705,8 @@ def step_input(self, x, level=0):
17011705
Mark a sequence as a dynamic RNN input.
17021706
17031707
Args:
1704-
x(Variable): The input sequence.
1705-
level(int): The level of lod used to split steps. Default: 0.
1708+
x (Variable): The input sequence which should have lod information.
1709+
level (int): The level of lod used to split steps. Default: 0.
17061710
17071711
Returns:
17081712
The current timestep in the input sequence.
@@ -1753,13 +1757,37 @@ def step_input(self, x, level=0):
17531757
def static_input(self, x):
17541758
"""
17551759
Mark a variable as a RNN input. The input will not be scattered into
1756-
time steps.
1760+
time steps. It is optional.
17571761
17581762
Args:
1759-
x(Variable): The input variable.
1763+
x (Variable): The input variable.
17601764
17611765
Returns:
17621766
The input variable that can access in RNN.
1767+
1768+
Examples:
1769+
.. code-block:: python
1770+
1771+
import paddle.fluid as fluid
1772+
1773+
sentence = fluid.layers.data(name='sentence', dtype='float32', shape=[32], lod_level=1)
1774+
encoder_proj = fluid.layers.data(name='encoder_proj', dtype='float32', shape=[32], lod_level=1)
1775+
decoder_boot = fluid.layers.data(name='boot', dtype='float32', shape=[10], lod_level=1)
1776+
1777+
drnn = fluid.layers.DynamicRNN()
1778+
with drnn.block():
1779+
current_word = drnn.step_input(sentence)
1780+
encoder_word = drnn.static_input(encoder_proj)
1781+
hidden_mem = drnn.memory(init=decoder_boot, need_reorder=True)
1782+
fc_1 = fluid.layers.fc(input=encoder_word, size=30, bias_attr=False)
1783+
fc_2 = fluid.layers.fc(input=current_word, size=30, bias_attr=False)
1784+
decoder_inputs = fc_1 + fc_2
1785+
h, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=hidden_mem, size=30)
1786+
drnn.update_memory(hidden_mem, h)
1787+
out = fluid.layers.fc(input=h, size=10, bias_attr=True, act='softmax')
1788+
drnn.output(out)
1789+
1790+
rnn_output = drnn()
17631791
"""
17641792
self._assert_in_rnn_block_("static_input")
17651793
if not isinstance(x, Variable):
@@ -1836,54 +1864,51 @@ def memory(self,
18361864
the input variable. It should be set to true when the initialized memory
18371865
depends on the input sample.
18381866
1839-
For example,
1840-
1841-
>>> import paddle.fluid as fluid
1842-
>>> sentence = fluid.layers.data(
1843-
>>> name='sentence', dtype='float32', shape=[32])
1844-
>>> boot_memory = fluid.layers.data(
1845-
>>> name='boot', dtype='float32', shape=[10])
1846-
>>>
1847-
>>> drnn = fluid.layers.DynamicRNN()
1848-
>>> with drnn.block():
1849-
>>> word = drnn.step_input(sentence)
1850-
>>> memory = drnn.memory(init=boot_memory, need_reorder=True)
1851-
>>> hidden = fluid.layers.fc(
1852-
>>> input=[word, memory], size=10, act='tanh')
1853-
>>> drnn.update_memory(ex_mem=memory, new_mem=hidden)
1854-
>>> drnn.output(hidden)
1855-
>>> rnn_output = drnn()
1867+
Examples:
1868+
.. code-block:: python
1869+
1870+
import paddle.fluid as fluid
1871+
1872+
sentence = fluid.layers.data(name='sentence', shape=[32], dtype='float32', lod_level=1)
1873+
boot_memory = fluid.layers.data(name='boot', shape=[10], dtype='float32', lod_level=1)
1874+
1875+
drnn = fluid.layers.DynamicRNN()
1876+
with drnn.block():
1877+
word = drnn.step_input(sentence)
1878+
memory = drnn.memory(init=boot_memory, need_reorder=True)
1879+
hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
1880+
drnn.update_memory(ex_mem=memory, new_mem=hidden)
1881+
drnn.output(hidden)
1882+
1883+
rnn_output = drnn()
18561884
18571885
18581886
Otherwise, if :code:`shape`, :code:`value`, :code:`dtype` are set, the
18591887
:code:`memory` will be initialized by this :code:`value`.
18601888
1861-
For example,
1889+
Examples:
1890+
.. code-block:: python
18621891
1863-
>>> import paddle.fluid as fluid
1864-
>>> sentence = fluid.layers.data(
1865-
>>> name='sentence', dtype='float32', shape=[32])
1866-
>>>
1867-
>>> drnn = fluid.layers.DynamicRNN()
1868-
>>> with drnn.block():
1869-
>>> word = drnn.step_input(sentence)
1870-
>>> memory = drnn.memory(shape=[10], dtype='float32', value=0)
1871-
>>> hidden = fluid.layers.fc(
1872-
>>> input=[word, memory], size=10, act='tanh')
1873-
>>> drnn.update_memory(ex_mem=memory, new_mem=hidden)
1874-
>>> drnn.output(hidden)
1875-
>>> rnn_output = drnn()
1892+
import paddle.fluid as fluid
18761893
1894+
sentence = fluid.layers.data(name='sentence', dtype='float32', shape=[32], lod_level=1)
1895+
1896+
drnn = fluid.layers.DynamicRNN()
1897+
with drnn.block():
1898+
word = drnn.step_input(sentence)
1899+
memory = drnn.memory(shape=[10], dtype='float32', value=0)
1900+
hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
1901+
drnn.update_memory(ex_mem=memory, new_mem=hidden)
1902+
drnn.output(hidden)
18771903
1878-
Args:
1879-
init(Variable|None): The initialized variable.
1904+
rnn_output = drnn()
18801905
1881-
shape(list|tuple): The memory shape. NOTE the shape does not contain batch_size.
18821906
1907+
Args:
1908+
init(Variable|None): The initialized variable.
1909+
shape(list|tuple): The memory shape. The shape does not contain batch_size.
18831910
value(float): the initalized value.
1884-
18851911
need_reorder(bool): True if the initialized memory depends on the input sample.
1886-
18871912
dtype(str|numpy.dtype): The data type of the initialized memory.
18881913
18891914
Returns:

python/paddle/fluid/layers/tensor.py

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -275,14 +275,23 @@ def sums(input, out=None):
275275
Examples:
276276
.. code-block:: python
277277
278-
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
279-
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
280-
a0 = layers.array_read(array=tmp, i=i)
281-
i = layers.increment(x=i)
282-
a1 = layers.array_read(array=tmp, i=i)
283-
mean_a0 = layers.mean(a0)
284-
mean_a1 = layers.mean(a1)
285-
a_sum = layers.sums(input=[mean_a0, mean_a1])
278+
import paddle.fluid as fluid
279+
280+
# sum of several tensors
281+
a0 = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1)
282+
a1 = fluid.layers.fill_constant(shape=[1], dtype='int64', value=2)
283+
a2 = fluid.layers.fill_constant(shape=[1], dtype='int64', value=3)
284+
sums = fluid.layers.sums(input=[a0, a1, a2])
285+
286+
# sum of a tensor array
287+
array = fluid.layers.create_array('int64')
288+
i = fluid.layers.zeros(shape=[1], dtype='int64', force_cpu=True)
289+
fluid.layers.array_write(a0, array=array, i=i)
290+
i = fluid.layers.increment(x=i)
291+
fluid.layers.array_write(a1, array=array, i=i)
292+
i = fluid.layers.increment(x=i)
293+
fluid.layers.array_write(a2, array=array, i=i)
294+
sums = fluid.layers.sums(input=array)
286295
"""
287296
helper = LayerHelper('sum', **locals())
288297
if out is None:

0 commit comments

Comments
 (0)