|
12 | 12 | # See the License for the specific language governing permissions and
|
13 | 13 | # limitations under the License.
|
14 | 14 |
|
15 |
| -#Todo(luotao02) This config is only used for unitest. It is out of date now, and will be updated later. |
| 15 | +from paddle.trainer_config_helpers import * |
16 | 16 |
|
17 | 17 | ################################### Data Configuration ###################################
|
18 |
| -TrainData(ProtoData(files = "train.list")) |
| 18 | +TrainData(ProtoData(files = "trainer/tests/mnist.list")) |
19 | 19 | ################################### Algorithm Configuration ###################################
|
20 |
| -Settings( |
21 |
| - learning_rate_decay_a = 0.0, |
22 |
| - learning_rate_decay_b = 0.0, |
23 |
| - learning_rate = 1e-03, |
24 |
| - batch_size = 1000, |
25 |
| - algorithm = 'sgd', |
26 |
| - num_batches_per_send_parameter = 1, |
27 |
| - num_batches_per_get_parameter = 1, |
28 |
| - learning_method='sparse_momentum', |
29 |
| -) |
30 |
| -default_momentum(0.5) |
| 20 | +settings(batch_size = 1000, |
| 21 | + learning_method = MomentumOptimizer(momentum=0.5, sparse=False)) |
31 | 22 | ################################### Network Configuration ###################################
|
32 |
| -Layer(type = "data", name = "input", size = 784) |
33 |
| -Layer(inputs = [Input("input", parameter_name = "_layer1.w")], name = "layer1", bias = Bias(parameter_name = "_layer1.bias"), active_type = "sigmoid", type = "fc", size = 800) |
34 |
| -Layer(inputs = [Input("layer1", parameter_name = "_layer2.w")], name = "layer2", bias = Bias(parameter_name = "_layer2.bias"), active_type = "sigmoid", type = "fc", size = 800) |
35 |
| -#Layer(inputs = [Input("layer2", parameter_name = "_layer_output.w", decay_rate = 0.02)], name = "output", bias = Bias(parameter_name = "_layer_output.bias"), active_type = "margin", type = "fc", size = 10) |
36 |
| -#Layer(inputs = [Input("layer2", parameter_name = "_layer_output.w", decay_rate = 0.02)], name = "output", bias = Bias(parameter_name = "_layer_output.bias"), type = "fc", size = 10) |
37 |
| -Layer(inputs = [Input("layer2", parameter_name = "_layer_output.w")], name = "output", bias = Bias(parameter_name = "_layer_output.bias"), active_type = "softmax", type = "fc", size = 10) |
38 |
| -Layer(type = "data", name = "label", size = 1) |
39 |
| -Layer(inputs = [Input("output"), Input("label")], type = "multi-class-cross-entropy", name = "cost") |
40 |
| -#Layer(inputs = [Input("output"), Input("label")], type = "huber", name = "cost") |
41 |
| -Evaluator(inputs=["output", "label"], type = "classification_error", name = "classification_error") |
42 |
| -Inputs("input", "label") |
43 |
| -Outputs("cost") |
| 23 | +data = data_layer(name ="input", size=784) |
| 24 | + |
| 25 | +fc1 = fc_layer(input=data, size=800, |
| 26 | + bias_attr=True, |
| 27 | + act=SigmoidActivation()) |
| 28 | + |
| 29 | +fc2 = fc_layer(input=fc1, size=800, |
| 30 | + bias_attr=True, |
| 31 | + act=SigmoidActivation()) |
| 32 | + |
| 33 | +output = fc_layer(input=[fc1, fc2], size=10, |
| 34 | + bias_attr=True, |
| 35 | + act=SoftmaxActivation()) |
| 36 | + |
| 37 | +lbl = data_layer(name ="label", size=1) |
| 38 | + |
| 39 | +cost = classification_cost(input=output, label=lbl) |
| 40 | +outputs(cost) |
0 commit comments