Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
df221af
Stop using Type in Python bindings
Jun 19, 2019
0456097
Update on "Stop using Type in Python bindings"
Jun 19, 2019
b73a9c6
Update on "Stop using Type in Python bindings"
Jun 19, 2019
30cf761
Update on "Move backward and set_data off of Type"
Jun 20, 2019
ce10994
Update on "Move backward and set_data off of Type"
Jun 20, 2019
86e96a3
Update on "Move backward and set_data off of Type"
Jun 21, 2019
07b3e8e
Update on "Move backward and set_data off of Type"
Jun 21, 2019
c9236e3
Update on "Move backward and set_data off of Type"
Jun 21, 2019
db7b539
Update on "Move backward and set_data off of Type"
Jun 21, 2019
f979cf2
Update on "Move backward and set_data off of Type"
Jun 21, 2019
38504e0
Update on "Move backward and set_data off of Type"
Jun 21, 2019
ae29f78
Update on "Move backward and set_data off of Type"
Jun 21, 2019
d625561
Update on "Move backward and set_data off of Type"
Jun 21, 2019
87eb996
Update on "Move backward and set_data off of Type"
Jun 22, 2019
0b2580c
Update on "Move backward and set_data off of Type"
Jun 24, 2019
ff87151
Update on "Move backward and set_data off of Type"
Jun 24, 2019
81913bb
Update on "Move backward and set_data off of Type"
Jun 24, 2019
f3aa351
Update on "Move backward and set_data off of Type"
Jun 24, 2019
21adc45
Update on "Move backward and set_data off of Type"
Jun 24, 2019
297f311
Update on "Move backward and set_data off of Type"
Jun 25, 2019
1df0016
Update on "Move backward and set_data off of Type"
Jun 25, 2019
22692e8
Update on "Move backward and set_data off of Type"
Jun 25, 2019
d27e622
Update on "Move backward and set_data off of Type"
Jun 25, 2019
0abf2b3
Update on "Move backward and set_data off of Type"
Jun 26, 2019
0128bf5
Update on "Move backward and set_data off of Type"
Jun 26, 2019
0bdbe22
Update on "Move backward and set_data off of Type"
Jun 28, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions aten/src/ATen/CPUTypeDefault.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
namespace at {

struct CAFFE2_API CPUTypeDefault : public TypeDefault {
CPUTypeDefault(TensorTypeId type_id, bool is_variable, bool is_undefined)
: TypeDefault(type_id, is_variable, is_undefined) {}
CPUTypeDefault()
: TypeDefault() {}
};

} // namespace at
18 changes: 1 addition & 17 deletions aten/src/ATen/UndefinedType.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,7 @@
namespace at {

UndefinedType::UndefinedType()
: TypeDefault(UndefinedTensorId(), /*is_variable=*/false, /*is_undefined=*/true) {}
Backend UndefinedType::backend() const {
return Backend::Undefined;
}
: TypeDefault() {}

const char * UndefinedType::toString() const {
return "UndefinedType";
Expand All @@ -17,17 +14,4 @@ TypeID UndefinedType::ID() const {
return TypeID::Undefined;
}

Type & UndefinedType::toBackend(Backend b) const {
if (b == Backend::Undefined) {
return TypeDefault::toBackend(b);
}
AT_ERROR("toBackend not implemented for UndefinedType to non-UndefinedType");
}
Type & UndefinedType::toScalarType(ScalarType s) const {
if (s == ScalarType::Undefined) {
return TypeDefault::toScalarType(s);
}
AT_ERROR("toScalarType not implemented for UndefinedType to non-UndefinedType");
}

}
3 changes: 0 additions & 3 deletions aten/src/ATen/UndefinedType.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,7 @@ namespace at {

struct UndefinedType final : public TypeDefault {
explicit UndefinedType();
virtual Backend backend() const override;
virtual const char * toString() const override;
virtual Type & toBackend(Backend b) const override;
virtual Type & toScalarType(ScalarType s) const override;
virtual TypeID ID() const override;
};

Expand Down
10 changes: 2 additions & 8 deletions aten/src/ATen/core/Tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -349,19 +349,13 @@ class CAFFE2_API Tensor {
return impl_->grad();
}

void set_data(Tensor new_data);

/// Computes the gradient of current tensor w.r.t. graph leaves.
void backward(
c10::optional<Tensor> gradient = c10::nullopt,
bool keep_graph = false,
bool create_graph = false);

// STOP. Thinking of adding a method here, which only makes use
// of other ATen methods? Define it in native_functions.yaml.

//example
//Tensor * add(Tensor & b);
void backward(const Tensor & gradient={}, bool keep_graph=false, bool create_graph=false) const;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the original signature was intentionally optional<Tensor>, to make it clear that passing nullopt is valid (it's not obviously valid for other cases.) Does the codegen choke on this?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we've been planning to make Tensor? translate to optional<Tensor> for awhile now -- maybe now is a good time to do it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah this doesn't work on codegen yet.

void set_data(const Tensor & new_data) const;
Tensor abs() const;
Tensor & abs_();
Tensor acos() const;
Expand Down
17 changes: 7 additions & 10 deletions aten/src/ATen/core/TensorMethods.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,18 +56,15 @@ inline TensorOptions Tensor::options() const {
.is_variable(is_variable());
}

inline void Tensor::backward(
c10::optional<Tensor> gradient,
bool keep_graph,
bool create_graph) {
dispatch_type().backward(*this, std::move(gradient), keep_graph, create_graph);
// all static inline to allow for inlining of the non-dynamic part of dispatch
inline void Tensor::backward(const Tensor & gradient, bool keep_graph, bool create_graph) const {
static auto table = globalATenDispatch().getOpTable("aten::backward(Tensor self, Tensor? gradient=None, bool keep_graph=False, bool create_graph=False) -> void");
return table->getOp<void (const Tensor &, const Tensor &, bool, bool)>(tensorTypeIdToBackend(type_id()), is_variable())(*this, gradient, keep_graph, create_graph);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cool.

}

inline void Tensor::set_data(Tensor new_data) {
dispatch_type().set_data(*this, new_data);
inline void Tensor::set_data(const Tensor & new_data) const {
static auto table = globalATenDispatch().getOpTable("aten::set_data(Tensor(a!) self, Tensor new_data) -> void");
return table->getOp<void (const Tensor &, const Tensor &)>(tensorTypeIdToBackend(type_id()), is_variable())(*this, new_data);
}

// all static inline to allow for inlining of the non-dynamic part of dispatch
inline Tensor Tensor::abs() const {
static auto table = globalATenDispatch().getOpTable("aten::abs(Tensor self) -> Tensor");
return table->getOp<Tensor (const Tensor &)>(tensorTypeIdToBackend(type_id()), is_variable())(*this);
Expand Down
77 changes: 1 addition & 76 deletions aten/src/ATen/core/Type.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,86 +70,11 @@ enum class TypeID {
};

struct CAFFE2_API Type {
explicit Type(TensorTypeId type_id, bool is_variable, bool is_undefined)
: type_id_(type_id), is_variable_(is_variable), is_undefined_(is_undefined) {}
explicit Type() {}

virtual ~Type() {}
virtual Backend backend() const = 0;
Layout layout() const noexcept { return layout_from_backend(backend()); }
virtual bool is_cuda() const = 0;
virtual bool is_hip() const = 0;
virtual bool is_sparse() const = 0;
virtual bool is_quantized() const = 0;
virtual bool is_distributed() const = 0;
bool is_variable() const noexcept { return is_variable_; }
bool is_undefined() const noexcept { return is_undefined_; }
virtual const char * toString() const = 0;
virtual Type & toBackend(Backend b) const = 0;
virtual Type & toScalarType(ScalarType s) const = 0;
Type & toSparse() const {
return this->toBackend(at::toSparse(this->backend()));
}
Type & toDense() const {
return this->toBackend(at::toDense(this->backend()));
}
Type & cpu() const {
return this->toBackend(at::backendToCPU(this->backend()));
}
Type & cuda() const {
return this->toBackend(at::backendToCUDA(this->backend()));
}
Type & hip() const {
return this->toBackend(at::backendToHIP(this->backend()));
}
// contiguous IDs for all types in the system
// for external dispatch
virtual TypeID ID() const = 0;

// New-style TensorTypeId that supports open registration.
TensorTypeId type_id() const { return type_id_; }

// NB: This will return DeviceType::CPU for Backend::SparseCPU
DeviceType device_type() const {
return backendToDeviceType(backend());
}

virtual void backward(
Tensor& self,
c10::optional<Tensor> gradient,
bool keep_graph,
bool create_graph) const = 0;
virtual void set_data(Tensor & self, Tensor new_data) const = 0;

bool operator==(const Type& other) const {
return this == &other;
}
bool operator!=(const Type& other) const {
return this != &other;
}

TensorOptions options(ScalarType s, int16_t device_index = -1) const {
return TensorOptions().dtype(s)
.device(device_type(), device_index)
.layout(layout())
.is_variable(is_variable());
}

/// Constructs the `TensorOptions` from a type and a Device. Asserts that
/// the device type matches the device type of the type.
TensorOptions options(ScalarType s, c10::optional<Device> device_opt) const {
if (!device_opt.has_value()) {
return options(s, -1);
} else {
Device device = device_opt.value();
AT_ASSERT(device.type() == device_type());
return options(s, device.index());
}
}

protected:
TensorTypeId type_id_;
bool is_variable_;
bool is_undefined_;
};

} // namespace at
4 changes: 2 additions & 2 deletions aten/src/ATen/cuda/CUDATypeDefault.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
namespace at {

struct AT_CUDA_API CUDATypeDefault : public TypeDefault {
CUDATypeDefault(TensorTypeId type_id, bool is_variable, bool is_undefined)
: TypeDefault(type_id, is_variable, is_undefined) {}
CUDATypeDefault()
: TypeDefault() {}
};

} // namespace at
16 changes: 16 additions & 0 deletions aten/src/ATen/native/VariableMethodStubs.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>

namespace at {
namespace native {

void backward(const Tensor& self, const Tensor& gradient, bool keep_graph, bool create_graph) {
AT_ERROR("backward is not implemented for Tensor");
}

void set_data(const Tensor& self, const Tensor& new_data) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the codegen for this looks wrong, because this is really an inplace method, so you'd think self would be Tensor &, not const Tensor &, probably because this doesn't end in a _. I can't off the top of my head think why this would cause a problem, just something to note.

AT_ERROR("set_data is not implemented for Tensor");
}

} // namespace native
} // namespace at
6 changes: 6 additions & 0 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,12 @@
- func: _cast_Half(Tensor self, bool non_blocking=False) -> Tensor
variants: function

- func: backward(Tensor self, Tensor? gradient=None, bool keep_graph=False, bool create_graph=False) -> void
variants: method

- func: set_data(Tensor(a!) self, Tensor new_data) -> void
variants: method

- func: _cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)
dispatch:
CUDA: _cudnn_ctc_loss
Expand Down
5 changes: 1 addition & 4 deletions aten/src/ATen/templates/SparseTypeDerived.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,7 @@
namespace at {

${Type}::${Type}()
: ${DeviceType}TypeDefault(${Backend}TensorId(), /*is_variable=*/false, /*is_undefined=*/false) {}
Backend ${Type}::backend() const {
return Backend::${Backend};
}
: ${DeviceType}TypeDefault() {}

const char * ${Type}::toString() const {
return "${Type}";
Expand Down
8 changes: 0 additions & 8 deletions aten/src/ATen/templates/Tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -349,14 +349,6 @@ class CAFFE2_API Tensor {
return impl_->grad();
}

void set_data(Tensor new_data);

/// Computes the gradient of current tensor w.r.t. graph leaves.
void backward(
c10::optional<Tensor> gradient = c10::nullopt,
bool keep_graph = false,
bool create_graph = false);

// STOP. Thinking of adding a method here, which only makes use
// of other ATen methods? Define it in native_functions.yaml.

Expand Down
11 changes: 0 additions & 11 deletions aten/src/ATen/templates/TensorMethods.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,17 +56,6 @@ inline TensorOptions Tensor::options() const {
.is_variable(is_variable());
}

inline void Tensor::backward(
c10::optional<Tensor> gradient,
bool keep_graph,
bool create_graph) {
dispatch_type().backward(*this, std::move(gradient), keep_graph, create_graph);
}

inline void Tensor::set_data(Tensor new_data) {
dispatch_type().set_data(*this, new_data);
}

// all static inline to allow for inlining of the non-dynamic part of dispatch
${tensor_method_definitions}

Expand Down
77 changes: 1 addition & 76 deletions aten/src/ATen/templates/Type.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,86 +63,11 @@ enum class TypeID {
};

struct CAFFE2_API Type {
explicit Type(TensorTypeId type_id, bool is_variable, bool is_undefined)
: type_id_(type_id), is_variable_(is_variable), is_undefined_(is_undefined) {}
explicit Type() {}

virtual ~Type() {}
virtual Backend backend() const = 0;
Layout layout() const noexcept { return layout_from_backend(backend()); }
virtual bool is_cuda() const = 0;
virtual bool is_hip() const = 0;
virtual bool is_sparse() const = 0;
virtual bool is_quantized() const = 0;
virtual bool is_distributed() const = 0;
bool is_variable() const noexcept { return is_variable_; }
bool is_undefined() const noexcept { return is_undefined_; }
virtual const char * toString() const = 0;
virtual Type & toBackend(Backend b) const = 0;
virtual Type & toScalarType(ScalarType s) const = 0;
Type & toSparse() const {
return this->toBackend(at::toSparse(this->backend()));
}
Type & toDense() const {
return this->toBackend(at::toDense(this->backend()));
}
Type & cpu() const {
return this->toBackend(at::backendToCPU(this->backend()));
}
Type & cuda() const {
return this->toBackend(at::backendToCUDA(this->backend()));
}
Type & hip() const {
return this->toBackend(at::backendToHIP(this->backend()));
}
// contiguous IDs for all types in the system
// for external dispatch
virtual TypeID ID() const = 0;

// New-style TensorTypeId that supports open registration.
TensorTypeId type_id() const { return type_id_; }

// NB: This will return DeviceType::CPU for Backend::SparseCPU
DeviceType device_type() const {
return backendToDeviceType(backend());
}

virtual void backward(
Tensor& self,
c10::optional<Tensor> gradient,
bool keep_graph,
bool create_graph) const = 0;
virtual void set_data(Tensor & self, Tensor new_data) const = 0;

bool operator==(const Type& other) const {
return this == &other;
}
bool operator!=(const Type& other) const {
return this != &other;
}

TensorOptions options(ScalarType s, int16_t device_index = -1) const {
return TensorOptions().dtype(s)
.device(device_type(), device_index)
.layout(layout())
.is_variable(is_variable());
}

/// Constructs the `TensorOptions` from a type and a Device. Asserts that
/// the device type matches the device type of the type.
TensorOptions options(ScalarType s, c10::optional<Device> device_opt) const {
if (!device_opt.has_value()) {
return options(s, -1);
} else {
Device device = device_opt.value();
AT_ASSERT(device.type() == device_type());
return options(s, device.index());
}
}

protected:
TensorTypeId type_id_;
bool is_variable_;
bool is_undefined_;
};

} // namespace at
19 changes: 0 additions & 19 deletions aten/src/ATen/templates/TypeDefault.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,25 +19,6 @@

namespace at {

void TypeDefault::backward(
Tensor& self,
c10::optional<Tensor> gradient,
bool keep_graph,
bool create_graph) const {
AT_ERROR("backward is not implemented for Tensor");
}

void TypeDefault::set_data(Tensor & self, Tensor new_data) const {
AT_ERROR("set_data is not implemented for Tensor");
}

Type & TypeDefault::toBackend(Backend b) const {
return at::globalContext().getNonVariableType(b, ScalarType::Undefined);
}
Type & TypeDefault::toScalarType(ScalarType s) const {
return at::globalContext().getNonVariableType(backend(),s);
}

${type_method_definitions}

static auto& registerer = globalATenDispatch()
Expand Down
Loading