Skip to content

[Bug-fix] fix Tensor.item() to np.array(Tensor).item due to low performance #52483

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ static PyObject* tensor_method_numpy(TensorObject* self,
"correct and will be "
"removed in future. For Tensor contain only one element, Please "
"modify "
" 'Tensor.numpy()[0]' to 'Tensor.item()' as soon as "
" 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as "
"possible, "
"otherwise 'Tensor.numpy()[0]' will raise error in future.";
py_rank = 1;
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/fluid/dygraph/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,21 +139,21 @@ def _float_(var):
), "only one element variable can be converted to float."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return float(var.item())
return float(np.array(var).flatten()[0])

def _long_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to long."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.item())
return int(np.array(var).flatten()[0])

def _int_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to int."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.item())
return int(np.array(var).flatten()[0])

def _len_(var):
assert var.ndim > 0, "len() of a 0D tensor is wrong"
Expand All @@ -171,7 +171,7 @@ def _index_(var):
), "only one element variable can be converted to python index."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.item())
return int(np.array(var).flatten()[0])

@property
def _ndim_(var):
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/dygraph/varbase_patch_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -718,11 +718,11 @@ def __nonzero__(self):
), "When Variable is used as the condition of if/while , Variable can only contain one element."
if framework.global_var._in_eager_mode_:
assert self._is_initialized(), "tensor not initialized"
return bool(self.item() > 0)
return bool(np.array(self) > 0)
else:
tensor = self.value().get_tensor()
assert tensor._is_initialized(), "tensor not initialized"
return bool(self.item() > 0)
return bool(np.array(tensor) > 0)

def __bool__(self):
return self.__nonzero__()
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/decode.py
Original file line number Diff line number Diff line change
Expand Up @@ -712,7 +712,7 @@ def _maybe_copy(state, new_state, step_mask):

step_idx = 0
step_idx_tensor = paddle.full(shape=[1], fill_value=step_idx, dtype="int64")
while cond.item():
while np.array(cond).item():
(step_outputs, next_states, next_inputs, next_finished) = decoder.step(
step_idx_tensor, inputs, states, **kwargs
)
Expand Down
6 changes: 4 additions & 2 deletions python/paddle/nn/functional/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np

from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import Variable, in_dygraph_mode

Expand Down Expand Up @@ -706,7 +708,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
else:
for i, var in enumerate(output_size):
if isinstance(var, Variable):
output_size[i] = var.item()
output_size[i] = np.array(var).item()

if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
Expand Down Expand Up @@ -1609,7 +1611,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):

if in_dygraph_mode():
output_size = [
item.item(0) if isinstance(item, Variable) else item
np.array(item).item(0) if isinstance(item, Variable) else item
for item in output_size
]
# output_size support Variable in static graph mode
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/static/nn/metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
"""
All layers just related to metric.
"""
import numpy as np

import paddle
from paddle import _legacy_C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype
Expand Down Expand Up @@ -76,7 +78,7 @@ def accuracy(input, label, k=1, correct=None, total=None):
if total is None:
total = _varbase_creator(dtype="int32")

_k = k.item(0) if isinstance(k, Variable) else k
_k = np.array(k).item(0) if isinstance(k, Variable) else k
topk_out, topk_indices = _legacy_C_ops.top_k_v2(
input, 'k', _k, 'sorted', False
)
Expand Down