diff --git a/python/paddle/distributed/auto_parallel/static/helper.py b/python/paddle/distributed/auto_parallel/static/helper.py index 4b33190e964313..f0ab702a193f6f 100644 --- a/python/paddle/distributed/auto_parallel/static/helper.py +++ b/python/paddle/distributed/auto_parallel/static/helper.py @@ -456,9 +456,7 @@ def init_pir(self, main_program, place): barrier_tensor = paddle.full([1], 1, dtype="int32") # barrier is not available in xpu for now if not paddle.framework.core.is_compiled_with_xpu(): - paddle._legacy_C_ops.barrier( - barrier_tensor, barrier_tensor, 'ring_id', 0 - ) + paddle._C_ops.barrier(barrier_tensor, 0) paddle.enable_static() def init(self, main_program, place, dist_context): @@ -600,9 +598,7 @@ def init(self, main_program, place, dist_context): barrier_tensor = paddle.full([1], 1, dtype="int32") # barrier is not available in xpu for now if not paddle.framework.core.is_compiled_with_xpu(): - paddle._legacy_C_ops.barrier( - barrier_tensor, barrier_tensor, 'ring_id', 0 - ) + paddle._C_ops.barrier(barrier_tensor, 0) paddle.enable_static() def cache_whole_graph_dist_attr(self, all_params): diff --git a/python/paddle/distributed/communication/group.py b/python/paddle/distributed/communication/group.py index 1497643b758766..cda2658d03f3b6 100644 --- a/python/paddle/distributed/communication/group.py +++ b/python/paddle/distributed/communication/group.py @@ -347,9 +347,7 @@ def barrier(group: Group | None = None) -> None: if framework.in_dynamic_mode(): # barrier is not available in xpu for now if not paddle.framework.core.is_compiled_with_xpu(): - return paddle._legacy_C_ops.barrier( - barrier_tensor, barrier_tensor, 'ring_id', ring_id - ) + return paddle._C_ops.barrier(barrier_tensor, ring_id) else: op_type = 'barrier' if not isinstance(ring_id, int):