Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,427 changes: 714 additions & 713 deletions aten/src/ATen/core/OpsAlreadyMovedToC10.cpp

Large diffs are not rendered by default.

1,875 changes: 750 additions & 1,125 deletions aten/src/ATen/core/TensorMethods.h

Large diffs are not rendered by default.

10 changes: 2 additions & 8 deletions aten/src/ATen/function_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,16 +113,10 @@ def TypedDict(name, attrs, total=True): # type: ignore
""")

DEFAULT_UNBOXEDONLY_FUNCTION_REGISTRATION = CodeTemplate("""\
.op(torch::RegisterOperators::options()
.schema("${schema_string}")
.impl_unboxedOnlyCatchAllKernel<${return_type} (${formals_types}), &TypeDefault::${api_name}>()
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA))
.registerOp<${return_type} (${formals_types})>(TensorTypeId::UndefinedTensorId, "${schema_string}", &TypeDefault::${api_name})
""")
BACKEND_UNBOXEDONLY_FUNCTION_REGISTRATION = CodeTemplate("""\
.op(torch::RegisterOperators::options()
.schema("${schema_string}")
.impl_unboxedOnlyKernel<${return_type} (${formals_types}), &${Type}::${api_name}>(TensorTypeId::${Backend}TensorId)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA))
.registerOp<${return_type} (${formals_types})>(TensorTypeId::${Backend}TensorId, "${schema_string}", &${Type}::${api_name})
""")
DEFAULT_FUNCTION_REGISTRATION = CodeTemplate("""\
.op(torch::RegisterOperators::options()
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/native/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,9 @@ set of reviewers.

### `use_c10_dispatcher`

Note: this functionality is temporary disabled because of code size bloat,
annotations are ignored.

```
use_c10_dispatcher: 'no'
use_c10_dispatcher: 'unboxed_only'
Expand Down
4 changes: 4 additions & 0 deletions aten/src/ATen/native_parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,10 @@ def run(paths):
declaration['supports_named_tensor'] = func.get('supports_named_tensor', False)
declaration['use_c10_dispatcher'] = func.get('use_c10_dispatcher', 'no')
assert declaration['use_c10_dispatcher'] in ['no', 'unboxed_only', 'full']
# TODO: remove this override once code size issues are fixed (see
# https://github.com/pytorch/pytorch/pull/26821 for more context)
declaration['use_c10_dispatcher'] = 'no'

declaration['category_override'] = func.get('category_override', '')
declaration['arguments'] = func.get('arguments', arguments)
declaration['type_method_definition_dispatch'] = func.get('dispatch', declaration['name'])
Expand Down
3 changes: 1 addition & 2 deletions aten/src/ATen/templates/SparseTypeDerived.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
#include <c10/util/Half.h>
#include <c10/core/UndefinedTensorImpl.h>
#include <c10/util/Optional.h>
#include <ATen/core/op_registration/op_registration.h>
#include <ATen/core/EnableNamedTensor.h>

#include <cstddef>
Expand All @@ -33,7 +32,7 @@ namespace at {
${type_derived_method_definitions}

#ifndef USE_STATIC_DISPATCH
static auto registerer = torch::RegisterOperators()
static auto& registerer = globalATenDispatch()
${function_registrations};
#endif
}
3 changes: 1 addition & 2 deletions aten/src/ATen/templates/TypeDefault.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,14 @@
#include <ATen/DeviceGuard.h>
#include <ATen/SparseTensorUtils.h>
#include <ATen/core/ATenDispatch.h>
#include <ATen/core/op_registration/op_registration.h>
#include <ATen/core/EnableNamedTensor.h>

namespace at {

${type_method_definitions}

#ifndef USE_STATIC_DISPATCH
static auto registerer = torch::RegisterOperators()
static auto& registerer = globalATenDispatch()
${function_registrations};
#endif
}
3 changes: 1 addition & 2 deletions aten/src/ATen/templates/TypeDerived.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
#include <utility>

#include <ATen/Config.h>
#include <ATen/core/op_registration/op_registration.h>
$extra_cuda_headers
$legacy_th_headers

Expand All @@ -45,7 +44,7 @@ Tensor * ${Type}::add(Tensor & a, Tensor & b) {
${type_derived_method_definitions}

#ifndef USE_STATIC_DISPATCH
static auto registerer = torch::RegisterOperators()
static auto& registerer = globalATenDispatch()
${function_registrations};
#endif
}
5 changes: 1 addition & 4 deletions tools/autograd/gen_variable_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,10 +154,7 @@
""")

UNBOXEDONLY_WRAPPER_REGISTRATION = CodeTemplate("""\
.op(torch::RegisterOperators::options()
.schema("${schema_string}")
.impl_unboxedOnlyKernel<${return_type} (${formal_types}), &VariableType::${api_name}>(TensorTypeId::VariableTensorId)
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA))
.registerOp<${return_type} (${formal_types})>(TensorTypeId::VariableTensorId, "${schema_string}", &VariableType::${api_name})
""")

WRAPPER_REGISTRATION = CodeTemplate("""\
Expand Down
8 changes: 1 addition & 7 deletions tools/autograd/templates/VariableType.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
#include "torch/csrc/autograd/VariableTypeUtils.h"

#include <ATen/TypeDefault.h>
#include <ATen/core/op_registration/op_registration.h>

// ${generated_comment}

Expand Down Expand Up @@ -32,11 +31,6 @@ namespace torch { namespace autograd {

${type_derived_method_definitions}

namespace {

static auto registerer = torch::RegisterOperators()
static auto& registerer = globalATenDispatch()
${wrapper_registrations};

}

}} // namespace torch::autograd