@@ -1645,23 +1645,7 @@ class DynamicRNN(object):
1645
1645
sample sequence can be different. This API automatically process them in
1646
1646
batch.
1647
1647
1648
- The input lod must be set. Please reference `lod_tensor`
1649
-
1650
- >>> import paddle.fluid as fluid
1651
- >>> data = fluid.layers.data(name='sentence', dtype='int64', lod_level=1)
1652
- >>> embedding = fluid.layers.embedding(input=data, size=[65535, 32],
1653
- >>> is_sparse=True)
1654
- >>>
1655
- >>> drnn = fluid.layers.DynamicRNN()
1656
- >>> with drnn.block():
1657
- >>> word = drnn.step_input(embedding)
1658
- >>> prev = drnn.memory(shape=[200])
1659
- >>> hidden = fluid.layers.fc(input=[word, prev], size=200, act='relu')
1660
- >>> drnn.update_memory(prev, hidden) # set prev to hidden
1661
- >>> drnn.output(hidden)
1662
- >>>
1663
- >>> # last is the last time step of rnn. It is the encoding result.
1664
- >>> last = fluid.layers.sequence_last_step(drnn())
1648
+ The input lod must be set. Please reference to `lod_tensor`.
1665
1649
1666
1650
The dynamic RNN will unfold sequence into timesteps. Users need to define
1667
1651
how to process each time step during the :code:`with` block.
@@ -1671,10 +1655,30 @@ class DynamicRNN(object):
1671
1655
1672
1656
The dynamic RNN can mark multiple variables as its output. Use `drnn()` to
1673
1657
get the output sequence.
1674
-
1658
+
1675
1659
NOTES:
1676
1660
Currently it is not supported that setting is_sparse to True of any
1677
1661
layers within DynamicRNN.
1662
+
1663
+ Examples:
1664
+ .. code-block:: python
1665
+
1666
+ import paddle.fluid as fluid
1667
+
1668
+ sentence = fluid.layers.data(name='sentence', shape=[1], dtype='int64', lod_level=1)
1669
+ embedding = fluid.layers.embedding(input=sentence, size=[65536, 32], is_sparse=True)
1670
+
1671
+ drnn = fluid.layers.DynamicRNN()
1672
+ with drnn.block():
1673
+ word = drnn.step_input(embedding)
1674
+ prev = drnn.memory(shape=[200])
1675
+ hidden = fluid.layers.fc(input=[word, prev], size=200, act='relu')
1676
+ drnn.update_memory(prev, hidden) # set prev to hidden
1677
+ drnn.output(hidden)
1678
+
1679
+ # Get the last time step of rnn. It is the encoding result.
1680
+ rnn_output = drnn()
1681
+ last = fluid.layers.sequence_last_step(rnn_output)
1678
1682
"""
1679
1683
BEFORE_RNN = 0
1680
1684
IN_RNN = 1
@@ -1701,8 +1705,8 @@ def step_input(self, x, level=0):
1701
1705
Mark a sequence as a dynamic RNN input.
1702
1706
1703
1707
Args:
1704
- x(Variable): The input sequence.
1705
- level(int): The level of lod used to split steps. Default: 0.
1708
+ x (Variable): The input sequence which should have lod information .
1709
+ level (int): The level of lod used to split steps. Default: 0.
1706
1710
1707
1711
Returns:
1708
1712
The current timestep in the input sequence.
@@ -1753,13 +1757,37 @@ def step_input(self, x, level=0):
1753
1757
def static_input (self , x ):
1754
1758
"""
1755
1759
Mark a variable as a RNN input. The input will not be scattered into
1756
- time steps.
1760
+ time steps. It is optional.
1757
1761
1758
1762
Args:
1759
- x(Variable): The input variable.
1763
+ x (Variable): The input variable.
1760
1764
1761
1765
Returns:
1762
1766
The input variable that can access in RNN.
1767
+
1768
+ Examples:
1769
+ .. code-block:: python
1770
+
1771
+ import paddle.fluid as fluid
1772
+
1773
+ sentence = fluid.layers.data(name='sentence', dtype='float32', shape=[32], lod_level=1)
1774
+ encoder_proj = fluid.layers.data(name='encoder_proj', dtype='float32', shape=[32], lod_level=1)
1775
+ decoder_boot = fluid.layers.data(name='boot', dtype='float32', shape=[10], lod_level=1)
1776
+
1777
+ drnn = fluid.layers.DynamicRNN()
1778
+ with drnn.block():
1779
+ current_word = drnn.step_input(sentence)
1780
+ encoder_word = drnn.static_input(encoder_proj)
1781
+ hidden_mem = drnn.memory(init=decoder_boot, need_reorder=True)
1782
+ fc_1 = fluid.layers.fc(input=encoder_word, size=30, bias_attr=False)
1783
+ fc_2 = fluid.layers.fc(input=current_word, size=30, bias_attr=False)
1784
+ decoder_inputs = fc_1 + fc_2
1785
+ h, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=hidden_mem, size=30)
1786
+ drnn.update_memory(hidden_mem, h)
1787
+ out = fluid.layers.fc(input=h, size=10, bias_attr=True, act='softmax')
1788
+ drnn.output(out)
1789
+
1790
+ rnn_output = drnn()
1763
1791
"""
1764
1792
self ._assert_in_rnn_block_ ("static_input" )
1765
1793
if not isinstance (x , Variable ):
@@ -1836,54 +1864,51 @@ def memory(self,
1836
1864
the input variable. It should be set to true when the initialized memory
1837
1865
depends on the input sample.
1838
1866
1839
- For example,
1840
-
1841
- >>> import paddle.fluid as fluid
1842
- >>> sentence = fluid.layers.data(
1843
- >>> name='sentence', dtype='float32', shape=[32])
1844
- >>> boot_memory = fluid.layers.data(
1845
- >>> name='boot', dtype='float32', shape=[10] )
1846
- >>>
1847
- >>> drnn = fluid.layers.DynamicRNN()
1848
- >>> with drnn.block():
1849
- >>> word = drnn.step_input(sentence)
1850
- >>> memory = drnn.memory(init=boot_memory, need_reorder=True)
1851
- >>> hidden = fluid.layers.fc(
1852
- >>> input=[word, memory], size=10, act='tanh' )
1853
- >>> drnn.update_memory(ex_mem=memory, new_mem= hidden)
1854
- >>> drnn.output(hidden)
1855
- >>> rnn_output = drnn()
1867
+ Examples:
1868
+ .. code-block:: python
1869
+
1870
+ import paddle.fluid as fluid
1871
+
1872
+ sentence = fluid.layers.data(name='sentence', shape=[32], dtype='float32', lod_level=1)
1873
+ boot_memory = fluid.layers.data( name='boot', shape=[10], dtype='float32', lod_level=1 )
1874
+
1875
+ drnn = fluid.layers.DynamicRNN()
1876
+ with drnn.block():
1877
+ word = drnn.step_input(sentence)
1878
+ memory = drnn.memory(init=boot_memory, need_reorder=True)
1879
+ hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
1880
+ drnn.update_memory(ex_mem= memory, new_mem=hidden )
1881
+ drnn.output( hidden)
1882
+
1883
+ rnn_output = drnn()
1856
1884
1857
1885
1858
1886
Otherwise, if :code:`shape`, :code:`value`, :code:`dtype` are set, the
1859
1887
:code:`memory` will be initialized by this :code:`value`.
1860
1888
1861
- For example,
1889
+ Examples:
1890
+ .. code-block:: python
1862
1891
1863
- >>> import paddle.fluid as fluid
1864
- >>> sentence = fluid.layers.data(
1865
- >>> name='sentence', dtype='float32', shape=[32])
1866
- >>>
1867
- >>> drnn = fluid.layers.DynamicRNN()
1868
- >>> with drnn.block():
1869
- >>> word = drnn.step_input(sentence)
1870
- >>> memory = drnn.memory(shape=[10], dtype='float32', value=0)
1871
- >>> hidden = fluid.layers.fc(
1872
- >>> input=[word, memory], size=10, act='tanh')
1873
- >>> drnn.update_memory(ex_mem=memory, new_mem=hidden)
1874
- >>> drnn.output(hidden)
1875
- >>> rnn_output = drnn()
1892
+ import paddle.fluid as fluid
1876
1893
1894
+ sentence = fluid.layers.data(name='sentence', dtype='float32', shape=[32], lod_level=1)
1895
+
1896
+ drnn = fluid.layers.DynamicRNN()
1897
+ with drnn.block():
1898
+ word = drnn.step_input(sentence)
1899
+ memory = drnn.memory(shape=[10], dtype='float32', value=0)
1900
+ hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
1901
+ drnn.update_memory(ex_mem=memory, new_mem=hidden)
1902
+ drnn.output(hidden)
1877
1903
1878
- Args:
1879
- init(Variable|None): The initialized variable.
1904
+ rnn_output = drnn()
1880
1905
1881
- shape(list|tuple): The memory shape. NOTE the shape does not contain batch_size.
1882
1906
1907
+ Args:
1908
+ init(Variable|None): The initialized variable.
1909
+ shape(list|tuple): The memory shape. The shape does not contain batch_size.
1883
1910
value(float): the initalized value.
1884
-
1885
1911
need_reorder(bool): True if the initialized memory depends on the input sample.
1886
-
1887
1912
dtype(str|numpy.dtype): The data type of the initialized memory.
1888
1913
1889
1914
Returns:
0 commit comments