|
66 | 66 | 'fc',
|
67 | 67 | 'embedding',
|
68 | 68 | 'conv2d',
|
69 |
| - 'dropout', |
70 | 69 | 'split',
|
71 | 70 | 'l2_normalize',
|
72 | 71 | 'row_conv',
|
@@ -750,139 +749,6 @@ def _pull_box_sparse(
|
750 | 749 | return outs
|
751 | 750 |
|
752 | 751 |
|
753 |
| -@deprecated(since="2.0.0", update_to="paddle.nn.functional.dropout") |
754 |
| -def dropout( |
755 |
| - x, |
756 |
| - dropout_prob, |
757 |
| - is_test=None, |
758 |
| - seed=None, |
759 |
| - name=None, |
760 |
| - dropout_implementation="downgrade_in_infer", |
761 |
| -): |
762 |
| - """ |
763 |
| -
|
764 |
| - Computes dropout. |
765 |
| -
|
766 |
| - Drop or keep each element of `x` independently. Dropout is a regularization |
767 |
| - technique for reducing overfitting by preventing neuron co-adaption during |
768 |
| - training. The dropout operator randomly sets (according to the given dropout |
769 |
| - probability) the outputs of some units to zero, while others are remain |
770 |
| - unchanged. |
771 |
| -
|
772 |
| - dropout op can be removed from the program to make the program more efficient. |
773 |
| -
|
774 |
| - Args: |
775 |
| - x (Variable): The input tensor variable. The data type is float16 or float32 or float64. |
776 |
| - dropout_prob (float): Probability of setting units to zero. |
777 |
| - is_test (bool): A flag indicating whether it is in test phrase or not. |
778 |
| - Default None, in dynamic graph, it use global tracer mode; in static graph, it means False. |
779 |
| - seed (int): A Python integer used to create random seeds. If this |
780 |
| - parameter is set to None, a random seed is used. |
781 |
| - NOTE: If an integer seed is given, always the same output |
782 |
| - units will be dropped. DO NOT use a fixed seed in training.Default: None. |
783 |
| - name (str|None): A name for this layer(optional). If set None, the layer |
784 |
| - will be named automatically. |
785 |
| - dropout_implementation(string): ['downgrade_in_infer'(default)|'upscale_in_train'] |
786 |
| -
|
787 |
| - 1. downgrade_in_infer(default), downgrade the outcome at inference |
788 |
| -
|
789 |
| - - train: out = input * mask |
790 |
| - - inference: out = input * (1.0 - dropout_prob) |
791 |
| -
|
792 |
| - (mask is a tensor same shape with input, value is 0 or 1 |
793 |
| - ratio of 0 is dropout_prob) |
794 |
| - 2. upscale_in_train, upscale the outcome at training time |
795 |
| -
|
796 |
| - - train: out = input * mask / ( 1.0 - dropout_prob ) |
797 |
| - - inference: out = input |
798 |
| -
|
799 |
| - (mask is a tensor same shape with input, value is 0 or 1 |
800 |
| - ratio of 0 is dropout_prob) |
801 |
| -
|
802 |
| -
|
803 |
| - Returns: |
804 |
| - A Variable holding Tensor representing the dropout, has same shape and data type with `x`. |
805 |
| -
|
806 |
| - Examples: |
807 |
| -
|
808 |
| - .. code-block:: python |
809 |
| -
|
810 |
| - import paddle |
811 |
| - import paddle.fluid as fluid |
812 |
| -
|
813 |
| - paddle.enable_static() |
814 |
| - x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32") |
815 |
| - dropped = fluid.layers.dropout(x, dropout_prob=0.5) |
816 |
| - """ |
817 |
| - if not isinstance(dropout_prob, (float, int, Variable)): |
818 |
| - raise TypeError( |
819 |
| - "dropout_prob argument should be a number(int|float) or Variable" |
820 |
| - ) |
821 |
| - # fast return for p == 0 |
822 |
| - if isinstance(dropout_prob, (int, float)) and dropout_prob == 0: |
823 |
| - return x |
824 |
| - |
825 |
| - if _non_static_mode(): |
826 |
| - if ( |
827 |
| - seed is None or seed == 0 |
828 |
| - ) and default_main_program().random_seed != 0: |
829 |
| - seed = default_main_program().random_seed |
830 |
| - if is_test is None: |
831 |
| - is_test = not _dygraph_tracer()._train_mode |
832 |
| - out, mask = _legacy_C_ops.dropout( |
833 |
| - x, |
834 |
| - 'dropout_prob', |
835 |
| - dropout_prob, |
836 |
| - 'is_test', |
837 |
| - is_test, |
838 |
| - 'fix_seed', |
839 |
| - seed is not None, |
840 |
| - 'seed', |
841 |
| - seed if seed is not None else 0, |
842 |
| - 'dropout_implementation', |
843 |
| - dropout_implementation, |
844 |
| - ) |
845 |
| - return out |
846 |
| - |
847 |
| - def get_attrs(prog, dropout_prob, is_test, seed): |
848 |
| - if (seed is None or seed == 0) and prog.random_seed != 0: |
849 |
| - seed = prog.random_seed |
850 |
| - if isinstance(dropout_prob, Variable) and not dropout_prob.shape != [1]: |
851 |
| - raise TypeError( |
852 |
| - "Required dropout_prob.shape == [1] if type(dropout_prob) is Variable, but received dropout_prob.shape = {}".format( |
853 |
| - dropout_prob.shape |
854 |
| - ) |
855 |
| - ) |
856 |
| - attrs = { |
857 |
| - 'dropout_prob': dropout_prob, |
858 |
| - 'is_test': is_test, |
859 |
| - 'fix_seed': seed is not None, |
860 |
| - 'seed': seed if seed is not None else 0, |
861 |
| - 'dropout_implementation': dropout_implementation, |
862 |
| - } |
863 |
| - return attrs |
864 |
| - |
865 |
| - helper = LayerHelper('dropout', **locals()) |
866 |
| - check_variable_and_dtype( |
867 |
| - x, 'x', ['float16', 'float32', 'float64'], 'dropout' |
868 |
| - ) |
869 |
| - |
870 |
| - out = helper.create_variable_for_type_inference(dtype=x.dtype) |
871 |
| - mask = helper.create_variable_for_type_inference( |
872 |
| - dtype=core.VarDesc.VarType.UINT8, stop_gradient=True |
873 |
| - ) |
874 |
| - |
875 |
| - attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed) |
876 |
| - |
877 |
| - helper.append_op( |
878 |
| - type='dropout', |
879 |
| - inputs={'X': [x]}, |
880 |
| - outputs={'Out': [out], 'Mask': [mask]}, |
881 |
| - attrs=attrs, |
882 |
| - ) |
883 |
| - return out |
884 |
| - |
885 |
| - |
886 | 752 | def conv2d(
|
887 | 753 | input,
|
888 | 754 | num_filters,
|
|
0 commit comments