Skip to content

Commit c67e05f

Browse files
authored
replace F.droput with nn.Dropout (#520)
* fix a shape typo * replace F.droput with nn.Dropout * add ann 80k configs * add ann 80k configs
1 parent c12afff commit c67e05f

9 files changed

+51
-51
lines changed
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
1-
_base_: 'ann_resnet50_os8_cityscapes_1024x512_160k.yml'
1+
_base_: 'ann_resnet101_os8_cityscapes_1024x512_80k.yml'
22

3-
model:
4-
backbone:
5-
type: ResNet101_vd
6-
pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
3+
iters: 160000
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
_base_: 'ann_resnet50_os8_cityscapes_1024x512_80k.yml'
2+
3+
model:
4+
backbone:
5+
type: ResNet101_vd
6+
pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,3 @@
1-
_base_: '../_base_/cityscapes.yml'
1+
_base_: 'ann_resnet50_os8_cityscapes_1024x512_80k.yml'
22

3-
batch_size: 2
43
iters: 160000
5-
6-
learning_rate:
7-
decay:
8-
end_lr: 1.0e-5
9-
10-
loss:
11-
types:
12-
- type: CrossEntropyLoss
13-
coef: [1, 0.4]
14-
15-
model:
16-
type: ANN
17-
backbone:
18-
type: ResNet50_vd
19-
output_stride: 8
20-
pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
21-
backbone_indices: [2, 3]
22-
key_value_channels: 256
23-
inter_channels: 512
24-
psp_size: [1, 3, 6, 8]
25-
enable_auxiliary_loss: True
26-
pretrained: null
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
_base_: '../_base_/cityscapes.yml'
2+
3+
batch_size: 2
4+
iters: 80000
5+
6+
learning_rate:
7+
decay:
8+
end_lr: 1.0e-5
9+
10+
loss:
11+
types:
12+
- type: CrossEntropyLoss
13+
coef: [1, 0.4]
14+
15+
model:
16+
type: ANN
17+
backbone:
18+
type: ResNet50_vd
19+
output_stride: 8
20+
pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz
21+
backbone_indices: [2, 3]
22+
key_value_channels: 256
23+
inter_channels: 512
24+
psp_size: [1, 3, 6, 8]
25+
enable_auxiliary_loss: True
26+
pretrained: null

dygraph/paddleseg/models/ann.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ def __init__(self,
203203
in_channels=out_channels + high_in_channels,
204204
out_channels=out_channels,
205205
kernel_size=1)
206-
self.dropout_prob = dropout_prob
206+
self.dropout = nn.Dropout(p=dropout_prob)
207207

208208
def forward(self, low_feats, high_feats):
209209
priors = [stage(low_feats, high_feats) for stage in self.stages]
@@ -212,7 +212,7 @@ def forward(self, low_feats, high_feats):
212212
context += priors[i]
213213

214214
output = self.conv_bn(paddle.concat([context, high_feats], axis=1))
215-
output = F.dropout(output, p=self.dropout_prob) # dropout_prob
215+
output = self.dropout(output)
216216

217217
return output
218218

@@ -251,7 +251,7 @@ def __init__(self,
251251
in_channels=in_channels * 2,
252252
out_channels=out_channels,
253253
kernel_size=1)
254-
self.dropout_prob = dropout_prob
254+
self.dropout = nn.Dropout(p=dropout_prob)
255255

256256
def forward(self, x):
257257
priors = [stage(x) for stage in self.stages]
@@ -260,7 +260,7 @@ def forward(self, x):
260260
context += priors[i]
261261

262262
output = self.conv_bn(paddle.concat([context, x], axis=1))
263-
output = F.dropout(output, p=self.dropout_prob) # dropout_prob
263+
output = self.dropout(output)
264264

265265
return output
266266

dygraph/paddleseg/models/fast_scnn.py

+3-13
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,11 @@
2626
class FastSCNN(nn.Layer):
2727
"""
2828
The FastSCNN implementation based on PaddlePaddle.
29-
3029
As mentioned in the original paper, FastSCNN is a real-time segmentation algorithm (123.5fps)
3130
even for high resolution images (1024x2048).
32-
3331
The original article refers to
3432
Poudel, Rudra PK, et al. "Fast-scnn: Fast semantic segmentation network"
3533
(https://arxiv.org/pdf/1902.04502.pdf).
36-
3734
Args:
3835
num_classes (int): The unique number of target classes.
3936
enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss.
@@ -95,9 +92,7 @@ def init_weight(self):
9592
class LearningToDownsample(nn.Layer):
9693
"""
9794
Learning to downsample module.
98-
9995
This module consists of three downsampling blocks (one conv and two separable conv)
100-
10196
Args:
10297
dw_channels1 (int, optional): The input channels of the first sep conv. Default: 32.
10398
dw_channels2 (int, optional): The input channels of the second sep conv. Default: 48.
@@ -132,10 +127,8 @@ def forward(self, x):
132127
class GlobalFeatureExtractor(nn.Layer):
133128
"""
134129
Global feature extractor module.
135-
136130
This module consists of three InvertedBottleneck blocks (like inverted residual introduced by MobileNetV2) and
137131
a PPModule (introduced by PSPNet).
138-
139132
Args:
140133
in_channels (int, optional): The number of input channels to the module. Default: 64.
141134
block_channels (tuple, optional): A tuple represents output channels of each bottleneck block. Default: (64, 96, 128).
@@ -189,7 +182,6 @@ def forward(self, x):
189182
class InvertedBottleneck(nn.Layer):
190183
"""
191184
Single Inverted bottleneck implementation.
192-
193185
Args:
194186
in_channels (int): The number of input channels to bottleneck block.
195187
out_channels (int): The number of output channels of bottleneck block.
@@ -236,9 +228,7 @@ def forward(self, x):
236228
class FeatureFusionModule(nn.Layer):
237229
"""
238230
Feature Fusion Module Implementation.
239-
240231
This module fuses high-resolution feature and low-resolution feature.
241-
242232
Args:
243233
high_in_channels (int): The channels of high-resolution feature (output of LearningToDownsample).
244234
low_in_channels (int): The channels of low-resolution feature (output of GlobalFeatureExtractor).
@@ -278,9 +268,7 @@ def forward(self, high_res_input, low_res_input):
278268
class Classifier(nn.Layer):
279269
"""
280270
The Classifier module implementation.
281-
282271
This module consists of two depth-wise conv and one conv.
283-
284272
Args:
285273
input_channels (int): The input channels to this module.
286274
num_classes (int): The unique number of target classes.
@@ -304,9 +292,11 @@ def __init__(self, input_channels, num_classes):
304292
self.conv = nn.Conv2D(
305293
in_channels=input_channels, out_channels=num_classes, kernel_size=1)
306294

295+
self.dropout = nn.Dropout(p=0.1) # dropout_prob
296+
307297
def forward(self, x):
308298
x = self.dsconv1(x)
309299
x = self.dsconv2(x)
310-
x = F.dropout(x, p=0.1) # dropout_prob
300+
x = self.dropout(x)
311301
x = self.conv(x)
312302
return x

dygraph/paddleseg/models/gcnet.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,8 @@ def __init__(self,
124124
kernel_size=3,
125125
padding=1)
126126

127+
self.dropout = nn.Dropout(p=0.1)
128+
127129
self.conv = nn.Conv2D(
128130
in_channels=gc_channels, out_channels=num_classes, kernel_size=1)
129131

@@ -147,7 +149,7 @@ def forward(self, feat_list):
147149
output = paddle.concat([x, output], axis=1)
148150
output = self.conv_bn_relu3(output)
149151

150-
output = F.dropout(output, p=0.1) # dropout_prob
152+
output = self.dropout(output)
151153
logit = self.conv(output)
152154
logit_list.append(logit)
153155

dygraph/paddleseg/models/layers/layer_libs.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -151,15 +151,15 @@ def __init__(self,
151151
kernel_size=3,
152152
padding=1)
153153

154+
self.dropout = nn.Dropout(p=dropout_prob)
155+
154156
self.conv = nn.Conv2D(
155157
in_channels=inter_channels,
156158
out_channels=out_channels,
157159
kernel_size=1)
158160

159-
self.dropout_prob = dropout_prob
160-
161161
def forward(self, x):
162162
x = self.conv_bn_relu(x)
163-
x = F.dropout(x, p=self.dropout_prob)
163+
x = self.dropout(x)
164164
x = self.conv(x)
165165
return x

dygraph/paddleseg/models/pspnet.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,8 @@ def __init__(self,
113113
out_channels=pp_out_channels,
114114
bin_sizes=bin_sizes)
115115

116+
self.dropout = nn.Dropout(p=0.1) # dropout_prob
117+
116118
self.conv = nn.Conv2D(
117119
in_channels=pp_out_channels,
118120
out_channels=num_classes,
@@ -130,7 +132,7 @@ def forward(self, feat_list):
130132
logit_list = []
131133
x = feat_list[self.backbone_indices[1]]
132134
x = self.psp_module(x)
133-
x = F.dropout(x, p=0.1) # dropout_prob
135+
x = self.dropout(x)
134136
logit = self.conv(x)
135137
logit_list.append(logit)
136138

0 commit comments

Comments
 (0)