Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 0 additions & 9 deletions aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,6 @@ Tensor new_with_itensor_mkldnn(ideep::tensor&& it, const TensorOptions& options)
std::vector<int64_t>(dims.begin(), dims.end()));
}

Tensor new_with_sizes_mkldnn(IntArrayRef sizes, const TensorOptions& options) {
// NOTE: int32_t dims from ideep::tensor but sizes needs int64_t
// TODO: support int64_t dims in ideep::tensor to avoid extra conversion
ideep::tensor::dims dst_dims (sizes.begin(), sizes.end());
ideep::tensor it;
it.resize<AllocForMKLDNN>(dst_dims, ideep::tensor::data_type::f32);
return new_with_itensor_mkldnn(std::move(it), options);
}

ideep::tensor& itensor_from_mkldnn(const MKLDNNTensor& mkldnn_tensor) {
AT_ASSERTM(mkldnn_tensor.is_mkldnn(),
"mkldnn_to_dense expects MKL-DNN tensor input");
Expand Down
3 changes: 0 additions & 3 deletions aten/src/ATen/native/mkldnn/MKLDNNCommon.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@ struct AllocForMKLDNN {
// Construct aten MKL-DNN tensor given an ideep tensor
Tensor new_with_itensor_mkldnn(ideep::tensor&& it, const TensorOptions& options);

// Construct aten MKL-DNN tensor given `sizes` for allocation
Tensor new_with_sizes_mkldnn(IntArrayRef sizes, const TensorOptions& options);

// Retrieve `ideep::tensor` from MKL-DNN tensor
ideep::tensor& itensor_from_mkldnn(const Tensor& mkldnn_tensor);

Expand Down
4 changes: 3 additions & 1 deletion aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,11 @@ Tensor dense_to_mkldnn(const Tensor& cpu_tensor) {
"dense_to_mkldnn expects dense CPU tensor input");
AT_ASSERTM(cpu_tensor.scalar_type() == ScalarType::Float,
"dense_to_mkldnn expects float tensor input");
AT_ASSERTM(cpu_tensor.dim() <= 5,
"Can't convert cpu tensor with the number of dimensions > 5");
// TODO: consider to convert non-contiguous tensor to `ideep::tensor` directly.
auto cpu_tensor_cont = cpu_tensor.contiguous();
Tensor mkldnn_tensor = new_with_sizes_mkldnn(cpu_tensor_cont.sizes(), cpu_tensor_cont.options());
Tensor mkldnn_tensor = empty_mkldnn(cpu_tensor_cont.sizes(), cpu_tensor_cont.options());
ideep::tensor& dtensor = itensor_from_mkldnn(mkldnn_tensor);
dtensor.reorder_from(dtensor.get_dims(),
ideep::tensor::data_type::f32,
Expand Down
24 changes: 24 additions & 0 deletions aten/src/ATen/native/mkldnn/TensorFactories.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#include <ATen/native/mkldnn/MKLDNNCommon.h>

namespace at { namespace native {

#if AT_MKLDNN_ENABLED()

Tensor empty_mkldnn(IntArrayRef sizes, const TensorOptions& options) {
// NOTE: int32_t dims from ideep::tensor but sizes needs int64_t
// TODO: support int64_t dims in ideep::tensor to avoid extra conversion
ideep::tensor::dims dst_dims (sizes.begin(), sizes.end());
ideep::tensor it;
it.resize<AllocForMKLDNN>(dst_dims, ideep::tensor::data_type::f32);
return new_with_itensor_mkldnn(std::move(it), options);
}

#else

Tensor empty_mkldnn(IntArrayRef sizes, const TensorOptions& options) {
AT_ERROR("empty_mkldnn: MKL-DNN build is disabled");
}

#endif // AT_MKLDNN_ENABLED()

}}
1 change: 1 addition & 0 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -688,6 +688,7 @@
dispatch:
CPU: empty_cpu
CUDA: empty_cuda
MkldnnCPU: empty_mkldnn
SparseCPU: empty_sparse
SparseCUDA: empty_sparse

Expand Down
7 changes: 6 additions & 1 deletion test/test_mkldnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def test_unsupported(self):
with self.assertRaises(RuntimeError) as context:
torch.randn(1, 2, 3, 4, dtype=torch.float, device=torch.device('cuda')).to_mkldnn()
# some factory functions
for creator in [torch.empty, torch.ones, torch.zeros, torch.randn, torch.rand]:
for creator in [torch.ones, torch.zeros, torch.randn, torch.rand]:
with self.assertRaises(RuntimeError) as context:
creator(1, 2, 3, 4, dtype=torch.float, device=torch.device('cpu'), layout=torch._mkldnn)

Expand Down Expand Up @@ -289,6 +289,11 @@ def test_set_data_tensorimpl_type(self):
with self.assertRaisesRegex(RuntimeError, 'different types of TensorImpl'):
x.data = x_mkldnn

def test_empty(self):
x1 = torch.empty(4, 5, 2, 3, dtype=torch.float32)
x2 = torch.empty(4, 5, 2, 3, dtype=torch.float32, layout=torch._mkldnn)
self.assertEqual(x1.size(), x2.to_dense().size())
self.assertEqual(x1.dtype, x2.to_dense().dtype)

if __name__ == '__main__':
run_tests()