Skip to content

Commit 4dcf2dc

Browse files
authored
Fix (#72341)
1 parent 33912da commit 4dcf2dc

File tree

8 files changed

+25
-23
lines changed

8 files changed

+25
-23
lines changed

python/paddle/base/reader.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1638,7 +1638,7 @@ def __init__(self, dataset, places, drop_last):
16381638
), f"Filelist number of dataset {len(dataset.filelist)} must be not less than place number {thread_num}"
16391639

16401640
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
1641-
logging.warn(
1641+
logging.warning(
16421642
f'thread_num {dataset.thread_num} which is set in Dataset is ignored'
16431643
)
16441644

@@ -1650,7 +1650,7 @@ def __init__(self, dataset, places, drop_last):
16501650
)
16511651
and dataset.queue_num > thread_num
16521652
):
1653-
logging.warn(
1653+
logging.warning(
16541654
f"queue_num {dataset.queue_num} which is set in Dataset is ignored"
16551655
)
16561656
dataset._set_queue_num(thread_num)

python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -543,10 +543,10 @@ def _can_apply(self):
543543

544544
if self.user_defined_strategy.dgc:
545545
if not isinstance(self.inner_opt, Momentum):
546-
logging.warn("dgc only works on Momentum optimizer")
546+
logging.warning("dgc only works on Momentum optimizer")
547547
return False
548548
if self.role_maker._worker_num() <= 1:
549-
logging.warn("dgc only works on multi cards")
549+
logging.warning("dgc only works on multi cards")
550550
return False
551551

552552
return True

python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ def _can_apply(self):
7373

7474
if self.user_defined_strategy.lamb:
7575
if not isinstance(self.inner_opt, Adam):
76-
logging.warn(
76+
logging.warning(
7777
f"lamb need the inner optimizer to be AdamOptimizer optimizer but got {self.inner_opt.type}."
7878
)
7979
return False

python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def _can_apply(self):
6262

6363
if self.user_defined_strategy.lars:
6464
if not isinstance(self.inner_opt, Momentum):
65-
logging.warn(
65+
logging.warning(
6666
f"lars need the inner optimizer to be Momentum optimizer but got {self.inner_opt.type}."
6767
)
6868
return False

python/paddle/distributed/fleet/runtime/collective_runtime.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -24,27 +24,27 @@ def __init__(self):
2424
super().__init__()
2525

2626
def _init_worker(self):
27-
logging.warn(
27+
logging.warning(
2828
"You should not call 'init_worker' method for collective mode."
2929
)
3030

3131
def _run_worker(self):
32-
logging.warn(
32+
logging.warning(
3333
"You should not call 'run_worker' method for collective mode."
3434
)
3535

3636
def _init_server(self, *args, **kwargs):
37-
logging.warn(
37+
logging.warning(
3838
"You should not call 'init_server' method for collective mode."
3939
)
4040

4141
def _run_server(self):
42-
logging.warn(
42+
logging.warning(
4343
"You should not call 'run_server' method for collective mode."
4444
)
4545

4646
def _stop_worker(self):
47-
logging.warn(
47+
logging.warning(
4848
"You should not call 'stop_worker' method for collective mode."
4949
)
5050

python/paddle/distributed/transpiler/distribute_transpiler.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1529,7 +1529,7 @@ def __clone_lr_op_sub_block__(op, program, lr_block):
15291529
)
15301530

15311531
if len(optimize_blocks) == 0:
1532-
logging.warn(
1532+
logging.warning(
15331533
"pserver [" + str(endpoint) + "] has no optimize block!!"
15341534
)
15351535
pre_block_idx = pserver_program.num_blocks - 1
@@ -2216,7 +2216,7 @@ def _create_table_optimize_block(
22162216
}
22172217
outputs = {"ParamOut": [param_var]}
22182218
# only support sgd now
2219-
logging.warn(
2219+
logging.warning(
22202220
"distribute lookup table only support sgd optimizer, change it's optimizer to sgd instead of "
22212221
+ table_opt_op.type
22222222
)

python/paddle/distributed/transpiler/memory_optimization_transpiler.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def memory_optimize(
2424
This API is deprecated since 1.6. Please do not use it. The better
2525
memory optimization strategies are enabled by default.
2626
"""
27-
logging.warn(
27+
logging.warning(
2828
'Caution! paddle.distributed.transpiler.memory_optimize() is deprecated '
2929
'and not maintained any more, since it is not stable!\n'
3030
'This API would not take any memory optimizations on your Program '
@@ -47,7 +47,7 @@ def release_memory(input_program, skip_opt_set=None):
4747
This API is deprecated since 1.6. Please do not use it. The better
4848
memory optimization strategies are enabled by default.
4949
"""
50-
logging.warn(
50+
logging.warning(
5151
'paddle.distributed.transpiler.release_memory() is deprecated, it would not'
5252
' take any memory release on your program'
5353
)

python/paddle/incubate/distributed/fleet/collective.py

+10-8
Original file line numberDiff line numberDiff line change
@@ -46,27 +46,27 @@ def __init__(self):
4646
self._param_file_name = "_paddle_fleet_param__"
4747

4848
def init_worker(self):
49-
logging.warn(
49+
logging.warning(
5050
"You should not call 'init_worker' method for collective mode."
5151
)
5252

5353
def run_worker(self, main_programs=None, scopes=None):
54-
logging.warn(
54+
logging.warning(
5555
"You should not call 'run_worker' method for collective mode."
5656
)
5757

5858
def init_server(self, model_dir=None):
59-
logging.warn(
59+
logging.warning(
6060
"You should not call 'init_server' method for collective mode."
6161
)
6262

6363
def run_server(self):
64-
logging.warn(
64+
logging.warning(
6565
"You should not call 'run_server' method for collective mode."
6666
)
6767

6868
def stop_worker(self):
69-
logging.warn(
69+
logging.warning(
7070
"You should not call 'stop_worker' method for collective mode."
7171
)
7272

@@ -410,11 +410,13 @@ def _try_to_compile(self, startup_program, main_program):
410410

411411
if node_num <= 1:
412412
if self._strategy.nccl_comm_num > 1:
413-
logging.warn("set nccl_comm_num=1 since you only have 1 node.")
413+
logging.warning(
414+
"set nccl_comm_num=1 since you only have 1 node."
415+
)
414416
self._strategy.nccl_comm_num = 1
415417

416418
if self._strategy.use_hierarchical_allreduce:
417-
logging.warn(
419+
logging.warning(
418420
"set use_hierarchical_allreduce=False since you only have 1 node."
419421
)
420422
self._strategy.use_hierarchical_allreduce = False
@@ -426,7 +428,7 @@ def _try_to_compile(self, startup_program, main_program):
426428
if sync_batch_norm is not None and sync_batch_norm is True:
427429
self._strategy.nccl_comm_num = 1
428430
self._strategy.use_hierarchical_allreduce = False
429-
logging.warn(
431+
logging.warning(
430432
"use sync_batch_norm will hang when set num_threads > 1, so "
431433
"set num_threads=1, nccl_comm_num=1, use_hierarchical_allreduce=False."
432434
)

0 commit comments

Comments
 (0)