@@ -63,9 +63,10 @@ struct CastToPyBufferImpl<true, I, ARGS...> {
63
63
auto *dst_ptr = static_cast <void *>(dst_tensor.mutable_data <CUR_TYPE>(
64
64
tensor.dims (), platform::CPUPlace ()));
65
65
66
- platform::DeviceContextPool &pool = platform::DeviceContextPool::Get ();
66
+ platform::DeviceContextPool &pool =
67
+ platform::DeviceContextPool::Instance;
67
68
auto dev_ctx = static_cast <const platform::CUDADeviceContext *>(
68
- pool.Borrow (tensor.place ()));
69
+ pool.Get (tensor.place ()));
69
70
70
71
paddle::platform::GpuMemcpyAsync (
71
72
dst_ptr, src_ptr, sizeof (CUR_TYPE) * tensor.numel (),
@@ -137,9 +138,9 @@ void PyCUDATensorSetFromArray(
137
138
self.Resize (framework::make_ddim (dims));
138
139
auto *dst = self.mutable_data <T>(place);
139
140
140
- platform::DeviceContextPool &pool = platform::DeviceContextPool::Get ();
141
+ platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance ();
141
142
auto dev_ctx =
142
- static_cast <const platform::CUDADeviceContext *>(pool.Borrow (place));
143
+ static_cast <const platform::CUDADeviceContext *>(pool.Get (place));
143
144
paddle::platform::GpuMemcpyAsync (dst, array.data (), sizeof (T) * array.size (),
144
145
cudaMemcpyHostToDevice, dev_ctx->stream ());
145
146
}
0 commit comments