@@ -45,11 +45,11 @@ def _supported_check():
45
45
return True
46
46
47
47
48
- class LRUSharedCache (OrderedDict ):
48
+ class _LRUSharedCache (OrderedDict ):
49
49
def __init__ (self ):
50
50
self .limit = 128
51
51
self ._after_fork ()
52
- register_after_fork (self , LRUSharedCache ._after_fork )
52
+ register_after_fork (self , _LRUSharedCache ._after_fork )
53
53
54
54
def _after_fork (self ):
55
55
self .lock = threading .Lock ()
@@ -73,25 +73,25 @@ def __setitem__(self, key, value):
73
73
super ().__setitem__ (key , value )
74
74
75
75
76
- shared_cache = LRUSharedCache ()
76
+ shared_cache = _LRUSharedCache ()
77
77
78
78
79
- def cuda_from_cache (key ):
79
+ def _cuda_from_cache (key ):
80
80
lodtensor = shared_cache .get (key )
81
81
if lodtensor is None :
82
82
return None
83
83
return lodtensor
84
84
85
85
86
- def rebuild_tensor (cls , lodtensor , metadata ):
87
- if cls == paddle .fluid .framework .ParamBase :
88
- tensor = paddle .fluid .framework .ParamBase (
86
+ def _rebuild_tensor (cls , lodtensor , metadata ):
87
+ if cls == paddle .fluid .framework .EagerParamBase :
88
+ tensor = paddle .fluid .framework .EagerParamBase (
89
89
lodtensor .shape (), lodtensor ._dtype (), ** metadata
90
90
)
91
91
tensor .value ().get_tensor ()._share_data_with (lodtensor )
92
92
else :
93
93
size , stop_gradient = metadata
94
- tensor = paddle .fluid .core .VarBase ()
94
+ tensor = paddle .fluid .core .eager . Tensor ()
95
95
if lodtensor ._is_initialized ():
96
96
tensor .value ().get_tensor ()._share_data_with (lodtensor )
97
97
else :
@@ -100,7 +100,7 @@ def rebuild_tensor(cls, lodtensor, metadata):
100
100
return tensor
101
101
102
102
103
- def reduce_tensor (tensor ):
103
+ def _reduce_tensor (tensor ):
104
104
lodtensor = tensor .value ().get_tensor ()
105
105
106
106
if not tensor .stop_gradient and not tensor .is_leaf :
@@ -113,29 +113,29 @@ def reduce_tensor(tensor):
113
113
or tensor .place .is_gpu_place ()
114
114
or tensor .place .is_cuda_pinned_place ()
115
115
):
116
- if type (tensor ) == paddle .fluid .framework .ParamBase :
116
+ if type (tensor ) == paddle .fluid .framework .EagerParamBase :
117
117
metadata = copy .deepcopy (tensor .__dict__ )
118
118
else :
119
119
metadata = (tensor .size , tensor .stop_gradient )
120
120
121
- return (rebuild_tensor , (type (tensor ), lodtensor , metadata ))
121
+ return (_rebuild_tensor , (type (tensor ), lodtensor , metadata ))
122
122
else :
123
123
raise ValueError (
124
124
"Only support tensors of CPU/CUDA/CUDAPinned Place, Not support %s for now!"
125
125
% tensor .place
126
126
)
127
127
128
128
129
- def rebuild_lodtensor_filename (cls , ipc_name , size , type_idx , dims , lod ):
129
+ def _rebuild_lodtensor_filename (cls , ipc_name , size , type_idx , dims , lod ):
130
130
lodtensor = cls ._new_shared_filename ((ipc_name , size , type_idx , dims , lod ))
131
131
lodtensor ._shared_decref ()
132
132
return lodtensor
133
133
134
134
135
- def rebuild_cuda_tensor (
135
+ def _rebuild_cuda_tensor (
136
136
cls , handle , offset_bytes , size , type_idx , dims , lod , device_idx
137
137
):
138
- cache_tensor = cuda_from_cache ((handle , offset_bytes ))
138
+ cache_tensor = _cuda_from_cache ((handle , offset_bytes ))
139
139
if cache_tensor is None :
140
140
lodtensor = cls ._new_shared_cuda (
141
141
(handle , offset_bytes , size , type_idx , dims , lod , device_idx )
@@ -155,33 +155,33 @@ def rebuild_cuda_tensor(
155
155
return lodtensor
156
156
157
157
158
- def rebuild_lodtensor_empty (cls ):
158
+ def _rebuild_lodtensor_empty (cls ):
159
159
# TODO: check if tensor initialized
160
160
# TODO: handle the dtype of empty tensor
161
161
return cls ()
162
162
163
163
164
- def reduce_lodtensor (lodtensor ):
164
+ def _reduce_lodtensor (lodtensor ):
165
165
if (
166
166
lodtensor ._place ().is_cpu_place ()
167
167
or lodtensor ._place ().is_cuda_pinned_place ()
168
168
):
169
169
for dim in lodtensor .shape ():
170
170
if dim == 0 :
171
171
# Empty tensors have nothing be mmapped.
172
- return (rebuild_lodtensor_empty , (type (lodtensor ),))
172
+ return (_rebuild_lodtensor_empty , (type (lodtensor ),))
173
173
174
174
# Default use share filename stratege
175
175
metadata = (
176
176
lodtensor ._share_filename ()
177
177
) # ipc_name, size, type_idx, dims, lod
178
- rebuild = rebuild_lodtensor_filename
178
+ rebuild = _rebuild_lodtensor_filename
179
179
lodtensor ._shared_incref ()
180
180
# TODO, maintain reference for lodtensor
181
181
# TODO: support file_discriptor stratege
182
182
elif lodtensor ._place ().is_gpu_place ():
183
183
metadata = lodtensor ._share_cuda ()
184
- rebuild = rebuild_cuda_tensor
184
+ rebuild = _rebuild_cuda_tensor
185
185
else :
186
186
raise RuntimeError ("We only support pass cpu/gpu lodtensor for now!" )
187
187
@@ -192,7 +192,9 @@ def init_reductions():
192
192
if not _supported_check ():
193
193
return
194
194
195
- ForkingPickler .register (paddle .Tensor , reduce_tensor )
196
- ForkingPickler .register (paddle .fluid .core .VarBase , reduce_tensor )
197
- ForkingPickler .register (paddle .fluid .framework .ParamBase , reduce_tensor )
198
- ForkingPickler .register (paddle .fluid .core .LoDTensor , reduce_lodtensor )
195
+ ForkingPickler .register (paddle .Tensor , _reduce_tensor )
196
+ ForkingPickler .register (paddle .fluid .core .eager .Tensor , _reduce_tensor )
197
+ ForkingPickler .register (
198
+ paddle .fluid .framework .EagerParamBase , _reduce_tensor
199
+ )
200
+ ForkingPickler .register (paddle .fluid .core .LoDTensor , _reduce_lodtensor )
0 commit comments