Skip to content

add __str__ method for tensor and lodtensor to support print test=dev… #17588

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
May 24, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 2 additions & 26 deletions paddle/fluid/framework/lod_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,32 +53,8 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) {
}

std::ostream &operator<<(std::ostream &os, const LoDTensor &t) {
if (!platform::is_cpu_place(t.place())) {
LoDTensor cpu_tensor;
cpu_tensor.set_lod(t.lod());
framework::TensorCopy(t, platform::CPUPlace(), &cpu_tensor);
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto &dev_ctx = *pool.Get(t.place());
dev_ctx.Wait();

os << cpu_tensor;
return os;
}

os << "dim: " << t.dims() << "\n";
os << "lod: " << t.lod() << "\n";

// only print first ten elements
int64_t size = t.numel() < 10 ? t.numel() : 10;
for (int64_t i = 0; i < size; ++i) {
if (t.type() == proto::VarType::FP32) {
os << t.data<float>()[i] << " ";
} else if (t.type() == proto::VarType::INT64) {
os << t.data<int64_t>()[i] << " ";
} else {
PADDLE_THROW("LoDTensor data type not in [float, int64_t]");
}
}
os << "\tlod: " << t.lod() << "\n";
os << static_cast<Tensor>(t) << "\n";

return os;
}
Expand Down
46 changes: 46 additions & 0 deletions paddle/fluid/framework/tensor_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -491,5 +491,51 @@ void TensorFromStream(std::istream& is, Tensor* tensor,
}
}

template <typename T>
std::ostream& print_tensor(std::ostream& os, const framework::Tensor& tensor) {
auto inspect = tensor.data<T>();
auto element_num = tensor.numel();

os << "\tdata: [";
if (element_num > 0) {
os << inspect[0];
for (int j = 1; j < element_num; ++j) {
os << " " << inspect[j];
}
}
os << "]";
return os;
}

std::ostream& operator<<(std::ostream& os, const Tensor& t) {
os << "\tdim: " << t.dims() << "\n";
os << "\tlayout: " << DataLayoutToString(t.layout()) << "\n";

Tensor tensor;
tensor.Resize(t.dims());
if (platform::is_cpu_place(t.place())) {
tensor.ShareDataWith(t);
} else {
platform::CPUPlace place;
framework::TensorCopy(t, place, &tensor);
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& dev_ctx = *pool.Get(t.place());
dev_ctx.Wait();
}

#define PrintTensorCallback(cpp_type, proto_type) \
do { \
if (tensor.type() == proto_type) { \
os << "\tdtype: " << proto_type << "\n"; \
print_tensor<cpp_type>(os, tensor); \
return os; \
} \
} while (0)

_ForEachDataType_(PrintTensorCallback);
VLOG(1) << "PrintVar: unrecognized data type:" << t.type();
return os;
}

} // namespace framework
} // namespace paddle
2 changes: 2 additions & 0 deletions paddle/fluid/framework/tensor_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -151,5 +151,7 @@ void TensorToVector(const Tensor& src, std::vector<T>* dst) {
memory::Copy(dst_place, dst_ptr, boost::get<platform::CPUPlace>(src.place()),
src_ptr, size);
}

std::ostream& operator<<(std::ostream& os, const Tensor& t);
} // namespace framework
} // namespace paddle
43 changes: 5 additions & 38 deletions paddle/fluid/platform/lodtensor_printer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,24 +20,6 @@ limitations under the License. */
namespace paddle {
namespace platform {

template <typename T>
void print_lod_tensor(const std::string& var_name,
const framework::LoDTensor& lod_tensor,
const std::string& print_info) {
auto inspect = lod_tensor.data<T>();
auto element_num = lod_tensor.numel();

std::ostringstream sstream;
sstream << print_info << "\t";
sstream << var_name << "\t";
sstream << inspect[0];
for (int j = 1; j < element_num; ++j) {
sstream << " " << inspect[j];
}

std::cout << sstream.str() << std::endl;
}

void PrintVar(framework::Scope* scope, const std::string& var_name,
const std::string& print_info) {
framework::Variable* var = scope->FindVar(var_name);
Expand All @@ -52,26 +34,11 @@ void PrintVar(framework::Scope* scope, const std::string& var_name,
return;
}

framework::LoDTensor printed_tensor;
printed_tensor.set_lod(tensor->lod());
printed_tensor.Resize(tensor->dims());
if (platform::is_cpu_place(tensor->place())) {
printed_tensor.ShareDataWith(*tensor);
} else {
platform::CPUPlace place;
framework::TensorCopy(*tensor, place, &printed_tensor);
}

#define PrintLoDTensorCallback(cpp_type, proto_type) \
do { \
if (tensor->type() == proto_type) { \
print_lod_tensor<cpp_type>(var_name, printed_tensor, print_info); \
return; \
} \
} while (0)

_ForEachDataType_(PrintLoDTensorCallback);
VLOG(1) << "PrintVar: unrecognized data type:" << printed_tensor.type();
std::ostringstream sstream;
sstream << print_info << "\t";
sstream << var_name << "\t";
sstream << *tensor << "\t";
std::cout << sstream.str() << std::endl;
}

} // end namespace platform
Expand Down
15 changes: 13 additions & 2 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ limitations under the License. */
#include "paddle/fluid/pybind/imperative.h"
#include "paddle/fluid/pybind/inference_api.h"
#include "paddle/fluid/pybind/ir.h"

#ifndef _WIN32
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
#endif
Expand Down Expand Up @@ -390,7 +391,12 @@ PYBIND11_MODULE(core, m) {
.def("_get_double_element", TensorGetElement<double>)
.def("_place", [](Tensor &self) { return self.place(); })
.def("_dtype", [](Tensor &self) { return self.type(); })
.def("__getitem__", PySliceTensor, py::return_value_policy::reference);
.def("__getitem__", PySliceTensor, py::return_value_policy::reference)
.def("__str__", [](const Tensor &self) {
std::stringstream ostr;
ostr << self;
return ostr.str();
});

py::class_<LoDTensor, Tensor>(m, "LoDTensor", R"DOC(
LoDTensor is a Tensor with optional LoD information.
Expand Down Expand Up @@ -609,7 +615,12 @@ PYBIND11_MODULE(core, m) {

Returns:
out (Tensor): new Tensor(NOT LoDTensor).
)DOC");
)DOC")
.def("__str__", [](const LoDTensor &self) {
std::stringstream ostr;
ostr << self;
return ostr.str();
});

py::class_<SelectedRows>(m, "SelectedRows")
.def("__init__",
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/fluid/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -564,8 +564,9 @@ def to_string(self, throw_on_error, with_details=False):
"""
if in_dygraph_mode():
# TODO(panyx0718): add more dygraph debug info.
return 'name %s, dtype: %s shape: %s' % (self.name, self.dtype,
self.shape)
return 'name %s, dtype: %s shape: %s %s' % (
self.name, self.dtype, self.shape,
str(self._ivar.value().get_tensor()))

assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
Expand Down
18 changes: 18 additions & 0 deletions python/paddle/fluid/tests/test_lod_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from __future__ import print_function

import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor
import numpy as np
import unittest
Expand Down Expand Up @@ -96,6 +97,23 @@ def test_create_random_int_lodtensor(self):
recursive_seq_lens)
self.assertEqual(tensor.shape(), [10, 1])

def test_print_lodtensor(self):
shape = [1]
recursive_seq_lens = [[2, 3, 5]]
dict_size = 100
low = 0
high = dict_size - 1
tensor = create_random_int_lodtensor(recursive_seq_lens, shape,
fluid.CPUPlace(), low, high)
print(tensor)
self.assertTrue(isinstance(str(tensor), str))

if core.is_compiled_with_cuda():
gtensor = create_random_int_lodtensor(recursive_seq_lens, shape,
fluid.CUDAPlace(0), low, high)
print(gtensor)
self.assertTrue(isinstance(str(gtensor), str))


if __name__ == '__main__':
unittest.main()
20 changes: 20 additions & 0 deletions python/paddle/fluid/tests/unittests/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,26 @@ def test_sliece_tensor(self):
place = core.CUDAPlace(0)
self.run_sliece_tensor(place)

def test_print_tensor(self):
scope = core.Scope()
var = scope.var("test_tensor")
place = core.CPUPlace()
tensor = var.get_tensor()
tensor._set_dims([10, 10])
tensor._alloc_int(place)
tensor_array = numpy.array(tensor)
self.assertEqual((10, 10), tensor_array.shape)
tensor_array[0, 0] = 1
tensor_array[2, 2] = 2
tensor.set(tensor_array, place)
print(tensor)
self.assertTrue(isinstance(str(tensor), str))

if core.is_compiled_with_cuda():
tensor.set(tensor_array, core.CUDAPlace(0))
print(tensor)
self.assertTrue(isinstance(str(tensor), str))


if __name__ == '__main__':
unittest.main()
20 changes: 19 additions & 1 deletion python/paddle/fluid/tests/unittests/test_variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from __future__ import print_function

import unittest
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_
from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_, in_dygraph_mode
import paddle.fluid as fluid
import paddle.fluid.core as core
import numpy as np
Expand Down Expand Up @@ -145,6 +145,24 @@ def test_slice(self):
if core.is_compiled_with_cuda():
self._test_slice(core.CUDAPlace(0))

def _tostring(self):
b = default_main_program().current_block()
w = b.create_var(dtype="float64", lod_level=0)
print(w)
self.assertTrue(isinstance(str(w), str))

if core.is_compiled_with_cuda():
wc = b.create_var(dtype="int", lod_level=0)
print(wc)
self.assertTrue(isinstance(str(wc), str))

def test_tostring(self):
with fluid.dygraph.guard():
self._tostring()

with fluid.program_guard(default_main_program()):
self._tostring()


if __name__ == '__main__':
unittest.main()