Skip to content

Commit ab0d9b1

Browse files
Jiewen Tanpytorchmergebot
authored andcommitted
[LT] Support Tensor.is_alias_of
Summary: Tensor.is_alias_of relies on Storage to perform. However, LTCTensorImpl was not implemented with that in mind. This commit adds a fake storage to LazyTensor as a marker to mark LazyTensors that point to the same storage. The reason why it's not done at LTCTensorImpl is that LazyTensor maintains the view ops/alias logic in LazyTensor class instead of relying on TensorImpl to do the check. Test Plan: ./build/bin/test_lazy --gtest_filter=LazyOpsTest.IsAliasOf Pull Request resolved: #75246 Approved by: https://github.com/bdhirsh
1 parent b311f25 commit ab0d9b1

File tree

7 files changed

+52
-19
lines changed

7 files changed

+52
-19
lines changed

c10/core/Allocator.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -217,9 +217,9 @@ struct AllocatorRegisterer {
217217
}
218218
};
219219

220-
#define REGISTER_ALLOCATOR(t, f) \
221-
namespace { \
222-
static AllocatorRegisterer<t> g_allocator_d(f); \
220+
#define REGISTER_ALLOCATOR(t, f) \
221+
namespace { \
222+
static c10::AllocatorRegisterer<t> g_allocator_d(f); \
223223
}
224224

225225
// An interface for reporting thread local memory usage

test/cpp/lazy/test_lazy_ops.cpp

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10721,6 +10721,26 @@ TEST_F(LazyOpsTest, TestLerpScalarOut) {
1072110721
ExpectCounterChanged("lazy::lerp", GetIgnoredCounters());
1072210722
}
1072310723

10724+
TEST_F(LazyOpsTest, IsAliasOf) {
10725+
auto a = torch::empty(4, torch::TensorOptions(torch::kFloat).device(DefaultDevice()));
10726+
auto b = torch::empty(4, torch::TensorOptions(torch::kFloat).device(DefaultDevice()));
10727+
10728+
ForEachDevice([&](const torch::Device& device) {
10729+
auto lazy_a = CopyToDevice(a, device);
10730+
auto lazy_b = CopyToDevice(b, device);
10731+
EXPECT_EQ(!a.is_alias_of(b), !lazy_a.is_alias_of(lazy_b));
10732+
10733+
auto c = a.view({2, 2});
10734+
auto lazy_c = lazy_a.view({2, 2});
10735+
EXPECT_EQ(a.is_alias_of(c), lazy_a.is_alias_of(lazy_c));
10736+
10737+
auto d = c.view({1, 4});
10738+
auto lazy_d = lazy_c.view({1, 4});
10739+
EXPECT_EQ(d.is_alias_of(c), lazy_d.is_alias_of(lazy_c));
10740+
EXPECT_EQ(d.is_alias_of(a), lazy_d.is_alias_of(lazy_a));
10741+
});
10742+
}
10743+
1072410744
#endif // FBCODE_CAFFE2
1072510745

1072610746
} // namespace lazy

torch/csrc/lazy/core/tensor.cpp

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
#include <c10/util/irange.h>
1+
#include <torch/csrc/lazy/core/config.h>
22
#include <torch/csrc/lazy/core/tensor.h>
33

4-
#include <torch/csrc/lazy/core/config.h>
4+
#include <c10/util/irange.h>
55
#include <torch/csrc/lazy/core/helpers.h>
66
#include <torch/csrc/lazy/core/ir_dump_util.h>
77
#include <torch/csrc/lazy/core/lazy_graph_executor.h>
@@ -64,22 +64,24 @@ LazyTensorPtr LazyTensor::Create(std::shared_ptr<Data> data) {
6464
}
6565

6666
LazyTensor::LazyTensor(const at::Tensor& tensor, const BackendDevice& device)
67-
: data_(std::make_shared<Data>(tensor, device)) {}
67+
: LazyTensor(std::make_shared<Data>(tensor, device)) {}
6868

6969
LazyTensor::LazyTensor(BackendDataPtr handle)
70-
: data_(std::make_shared<Data>(handle, handle->device())) {}
70+
: LazyTensor(std::make_shared<Data>(handle, handle->device())) {}
7171

7272
LazyTensor::LazyTensor(Value ir_value, const BackendDevice& device)
73-
: data_(std::make_shared<Data>(std::move(ir_value), device)) {
73+
: LazyTensor(std::make_shared<Data>(std::move(ir_value), device)) {
7474
TryLimitGraphSize();
7575
}
7676

7777
LazyTensor::LazyTensor(
7878
std::shared_ptr<LazyView> view,
7979
const BackendDevice& device)
80-
: data_(std::make_shared<Data>(std::move(view), device)) {}
80+
: LazyTensor(std::make_shared<Data>(std::move(view), device)) {}
8181

82-
LazyTensor::LazyTensor(std::shared_ptr<Data> data) : data_(std::move(data)) {}
82+
LazyTensor::LazyTensor(std::shared_ptr<Data> data)
83+
: data_(std::move(data))
84+
, storage_(c10::Storage({}, 0, c10::DataPtr(nullptr, backendDeviceToAtenDevice(data_->device)))) {}
8385

8486
LazyTensor::Data* LazyTensor::data() const {
8587
TORCH_CHECK(data_ != nullptr, "Trying to access a null cursor");
@@ -346,7 +348,9 @@ std::shared_ptr<LazyView> LazyTensor::CreateView(ViewInfo view_info) const {
346348
}
347349

348350
LazyTensorPtr LazyTensor::CreateViewTensor(ViewInfo view_info) const {
349-
return Create(CreateView(std::move(view_info)), GetDevice());
351+
auto new_tensor = Create(CreateView(std::move(view_info)), GetDevice());
352+
new_tensor->storage_ = Storage();
353+
return new_tensor;
350354
}
351355

352356
at::Tensor LazyTensor::ToTensor(bool detached) {

torch/csrc/lazy/core/tensor.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,11 @@ class TORCH_API LazyTensor : public c10::intrusive_ptr_target {
133133
// Applies the queue of operations in preparation for using the data.
134134
void ApplyPendingGraph();
135135

136+
const c10::Storage& Storage() const { return storage_; }
137+
// This is currently only used by outlier view ops such as expand that
138+
// don't go through CreateViewTensor to support Tensor.is_alias_of.
139+
void SetStorage(const c10::Storage& storage) { storage_ = storage; }
140+
136141
private:
137142
LazyTensor(const at::Tensor& tensor, const BackendDevice& device);
138143
LazyTensor(Value ir_value, const BackendDevice& device);
@@ -177,6 +182,12 @@ class TORCH_API LazyTensor : public c10::intrusive_ptr_target {
177182
static int64_t GetNextTensorId();
178183

179184
std::shared_ptr<Data> data_;
185+
// Temporarily used to suport Tensor.is_alias_of().
186+
// This is a fake storage that doesn't store anything.
187+
// Instead it serves as a marker to mark LazyTensors that
188+
// points to the same storage, and thus alias of each other.
189+
// FIXME(alanwaketan): Remove this once we have functionalization (bdhirsh).
190+
c10::Storage storage_;
180191
};
181192

182193
// Utils to convert at::Tensor to LazyTensor, and vice versa.

torch/csrc/lazy/core/tensor_impl.cpp

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#include <torch/csrc/lazy/core/tensor_impl.h>
22

3+
#include <c10/core/Allocator.h>
34
#include <c10/core/ScalarType.h>
45
#include <c10/core/impl/DeviceGuardImplInterface.h>
56
#include <c10/macros/Macros.h>
@@ -191,10 +192,6 @@ bool LTCTensorImpl::is_contiguous(c10::MemoryFormat _unused) const {
191192
return true;
192193
}
193194

194-
const at::Storage& LTCTensorImpl::storage() const {
195-
TORCH_CHECK(false, "Lazy tensors do not have storage");
196-
}
197-
198195
#endif // C10_DISABLE_TENSORIMPL_EXTENSIBILITY
199196

200197
} // namespace lazy

torch/csrc/lazy/core/tensor_impl.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#pragma once
22

33
#include <ATen/Tensor.h>
4-
#include <c10/core/Storage.h>
54
#include <c10/core/TensorImpl.h>
65

76
#include <torch/csrc/lazy/core/tensor.h>
@@ -44,8 +43,8 @@ class TORCH_API LTCTensorImpl final : public c10::TensorImpl {
4443
int64_t numel() const override;
4544

4645
bool is_contiguous(at::MemoryFormat memory_format) const override;
47-
const at::Storage& storage() const override;
48-
bool has_storage() const override { return false; }
46+
const at::Storage& storage() const override { return tensor_->Storage(); }
47+
bool has_storage() const override { return tensor_->Storage(); }
4948
#endif // C10_DISABLE_TENSORIMPL_EXTENSIBILITY
5049

5150
private:

torch/csrc/lazy/ts_backend/tensor_aten_ops.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,10 +116,12 @@ void as_strided_(torch::lazy::LazyTensorPtr& input, std::vector<int64_t> size,
116116

117117
torch::lazy::LazyTensorPtr expand(const torch::lazy::LazyTensorPtr& input, std::vector<int64_t> size) {
118118
auto input_shape = input->shape();
119-
return torch::lazy::LazyTensor::Create(torch::lazy::MakeNode<torch::lazy::Expand>(
119+
auto output = torch::lazy::LazyTensor::Create(torch::lazy::MakeNode<torch::lazy::Expand>(
120120
input->GetIrValue(),
121121
GetExpandDimensions(input_shape.Get(), std::move(size)),
122122
/*is_scalar_expand=*/false), input->GetDevice());
123+
output->SetStorage(input->Storage());
124+
return output;
123125
}
124126

125127
void fill_(torch::lazy::LazyTensorPtr& input, const at::Scalar& value) {

0 commit comments

Comments
 (0)