Skip to content

Commit d9551a2

Browse files
committed
rebase and address comments on "[jit] register __getitem__ builtin"
[jit] register __getitem__ builtin Summary: Follow up of #21990, I am switching index select operations to a standard __getitem__ builtin, rather than bunch of different builtins according to the type, such as prim::DictIndex, prim::ListIndex, etc. This will also aligned with the some other magic methods that we already use gh-metadata: pytorch pytorch 22276 gh/wanchaol/28/head
2 parents 8e4cd34 + 6561378 commit d9551a2

File tree

330 files changed

+7368
-4451
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

330 files changed

+7368
-4451
lines changed

.circleci/config.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ pytorch_linux_build_defaults: &pytorch_linux_build_defaults
9292
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
9393
9494
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
95-
NAMED_FLAG="export USE_NAMEDTENSOR=1"
95+
NAMED_FLAG="export BUILD_NAMEDTENSOR=1"
9696
fi
9797
9898
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo '"$NAMED_FLAG"' && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
@@ -136,7 +136,7 @@ pytorch_linux_test_defaults: &pytorch_linux_test_defaults
136136
output_image=${DOCKER_IMAGE}-${CIRCLE_SHA1}
137137
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
138138
export COMMIT_DOCKER_IMAGE=$output_image-namedtensor
139-
NAMED_FLAG="export USE_NAMEDTENSOR=1"
139+
NAMED_FLAG="export BUILD_NAMEDTENSOR=1"
140140
else
141141
export COMMIT_DOCKER_IMAGE=$output_image
142142
fi

.circleci/verbatim-sources/linux-build-defaults.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ pytorch_linux_build_defaults: &pytorch_linux_build_defaults
3434
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
3535
3636
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
37-
NAMED_FLAG="export USE_NAMEDTENSOR=1"
37+
NAMED_FLAG="export BUILD_NAMEDTENSOR=1"
3838
fi
3939
4040
export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo '"$NAMED_FLAG"' && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
@@ -78,7 +78,7 @@ pytorch_linux_test_defaults: &pytorch_linux_test_defaults
7878
output_image=${DOCKER_IMAGE}-${CIRCLE_SHA1}
7979
if [[ ${BUILD_ENVIRONMENT} == *"namedtensor"* ]]; then
8080
export COMMIT_DOCKER_IMAGE=$output_image-namedtensor
81-
NAMED_FLAG="export USE_NAMEDTENSOR=1"
81+
NAMED_FLAG="export BUILD_NAMEDTENSOR=1"
8282
else
8383
export COMMIT_DOCKER_IMAGE=$output_image
8484
fi

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ third_party/build/
4141
tools/shared/_utils_internal.py
4242
torch.egg-info/
4343
torch/__init__.pyi
44+
torch/nn/functional.pyi
45+
torch/nn/modules/*.pyi
4446
torch/csrc/autograd/generated/*
4547
torch/csrc/cudnn/cuDNN.cpp
4648
torch/csrc/generated

.jenkins/pytorch/win-test-helpers/build_pytorch.bat

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,9 @@ set CXX=sccache cl
6969

7070
set CMAKE_GENERATOR=Ninja
7171

72+
:: The following code will try to build PyTorch twice if USE_CUDA is neither 0
73+
:: nor 1. It is intended so that both builds can be folded into 1 CI run.
74+
7275
if not "%USE_CUDA%"=="1" (
7376
if "%REBUILD%"=="" (
7477
set NO_CUDA=1

CMakeLists.txt

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ option(BUILD_PYTHON "Build Python binaries" ON)
8181
option(BUILD_CAFFE2_OPS "Build Caffe2 operators" ON)
8282
option(BUILD_SHARED_LIBS "Build libcaffe2.so" ON)
8383
option(BUILD_CAFFE2_MOBILE "Build libcaffe2 for mobile (deprecating)" ON)
84+
option(BUILD_NAMEDTENSOR "Experimental: compile with namedtensor support" OFF)
8485
cmake_dependent_option(
8586
CAFFE2_LINK_LOCAL_PROTOBUF "If set, build protobuf inside libcaffe2.so." ON
8687
"BUILD_SHARED_LIBS AND BUILD_CUSTOM_PROTOBUF" OFF)
@@ -100,7 +101,6 @@ cmake_dependent_option(
100101
USE_CUDNN "Use cuDNN" ON
101102
"USE_CUDA" OFF)
102103
option(USE_FBGEMM "Use FBGEMM (quantized 8-bit server operators)" OFF)
103-
option(NAMEDTENSOR_ENABLED "Experimental: compile with namedtensor support" OFF)
104104
option(USE_FFMPEG "Use ffmpeg" OFF)
105105
option(USE_GFLAGS "Use GFLAGS" OFF)
106106
option(USE_GLOG "Use GLOG" OFF)
@@ -136,6 +136,9 @@ cmake_dependent_option(
136136
USE_MKLDNN "Use MKLDNN. Only available on x86 and x86_64." ON
137137
"CPU_INTEL" OFF)
138138
set(MKLDNN_ENABLE_CONCURRENT_EXEC ${USE_MKLDNN})
139+
cmake_dependent_option(
140+
USE_MKLDNN_CBLAS "Use CBLAS in MKLDNN" OFF
141+
"USE_MKLDNN" OFF)
139142
option(USE_DISTRIBUTED "Use distributed" ON)
140143
cmake_dependent_option(
141144
USE_MPI "Use MPI for Caffe2. Only available if USE_DISTRIBUTED is on." ON
@@ -282,8 +285,8 @@ if(USE_FBGEMM)
282285
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_FBGEMM")
283286
endif()
284287

285-
if(NAMEDTENSOR_ENABLED)
286-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNAMEDTENSOR_ENABLED")
288+
if(BUILD_NAMEDTENSOR)
289+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBUILD_NAMEDTENSOR")
287290
endif()
288291

289292
# ---[ Whitelist file if whitelist is specified

aten/src/ATen/ATen.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
#include <ATen/Dispatch.h>
1010
#include <ATen/Formatting.h>
1111
#include <ATen/Functions.h>
12-
#ifdef NAMEDTENSOR_ENABLED
12+
#ifdef BUILD_NAMEDTENSOR
1313
#include <ATen/NamedTensor.h>
1414
#endif
1515
#include <ATen/ScalarOps.h>
@@ -23,6 +23,7 @@
2323
#include <ATen/core/Scalar.h>
2424
#include <c10/core/Storage.h>
2525
#include <c10/core/TensorOptions.h>
26+
#include <ATen/core/Reduction.h>
2627
#include <c10/util/Exception.h>
2728
#include <ATen/core/ATenDispatch.h>
2829
#include <ATen/core/UnsafeFromTH.h>

aten/src/ATen/CPUTypeDefault.cpp

Lines changed: 0 additions & 7 deletions
This file was deleted.

aten/src/ATen/CPUTypeDefault.h

Lines changed: 0 additions & 11 deletions
This file was deleted.

aten/src/ATen/Context.cpp

Lines changed: 2 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -10,33 +10,16 @@
1010
#include <string>
1111
#include <stdexcept>
1212

13-
#include <ATen/RegisterCPU.h>
1413
#include <ATen/Tensor.h>
1514
#include <ATen/cpu/FlushDenormal.h>
1615

1716
#include <TH/TH.h> // for USE_LAPACK
1817

1918
namespace at {
2019

21-
static inline void errorHandler(const char * msg, void * data) {
22-
throw std::runtime_error(msg);
23-
}
24-
static inline void argErrorHandler(int arg, const char * msg, void * data) {
25-
std::stringstream new_error;
26-
new_error << "invalid argument " << arg << ": " << msg;
27-
throw std::runtime_error(new_error.str());
28-
}
29-
3020
Context::Context()
31-
: next_id(static_cast<size_t>(TypeID::NumOptions))
32-
, thc_state(nullptr, [](THCState* p){ /* no-op */ } )
33-
, thh_state(nullptr, [](THHState* p){ /* no-op */ } )
34-
{
35-
36-
THSetDefaultErrorHandler(errorHandler,nullptr);
37-
THSetDefaultArgErrorHandler(argErrorHandler,nullptr);
38-
register_cpu_types(this);
39-
}
21+
: thc_state(nullptr, [](THCState* p){ /* no-op */ } )
22+
, thh_state(nullptr, [](THHState* p){ /* no-op */ } ) {}
4023

4124
// TODO: This could be bad juju if someone calls globalContext() in the
4225
// destructor of an object with static lifetime.
@@ -108,38 +91,6 @@ bool Context::setFlushDenormal(bool on) {
10891
return at::cpu::set_flush_denormal(on);
10992
}
11093

111-
// NOTE: We also check `at::NonVariableTypeMode`, and if it's enabled we always
112-
// return non-Variable type in this function.
113-
// See NOTE [ Treating Variables as non-Variables in type dispatch ]
114-
TypeExtendedInterface& getType(TensorOptions options) {
115-
return globalContext().getType(
116-
options.backend(), typeMetaToScalarType(options.dtype()), options.is_variable() && !at::NonVariableTypeMode::is_enabled());
117-
}
118-
119-
// NOTE: We also check `at::NonVariableTypeMode`, and if it's enabled we always
120-
// return non-Variable type in this function.
121-
// See NOTE [ Treating Variables as non-Variables in type dispatch ]
122-
TypeExtendedInterface& getType(const TensorImpl* impl) {
123-
Backend backend = tensorTypeIdToBackend(impl->type_id());
124-
return globalContext().getType(
125-
backend, typeMetaToScalarType(impl->dtype()), impl->is_variable());
126-
}
127-
128-
TypeExtendedInterface& getType(const Tensor& t) {
129-
return getType(t.unsafeGetTensorImpl());
130-
}
131-
132-
LegacyTHDispatcher& getLegacyTHDispatcher(TensorOptions options) {
133-
return globalContext().getLegacyTHDispatcher(
134-
options.backend(), typeMetaToScalarType(options.dtype()));
135-
}
136-
137-
LegacyTHDispatcher& getLegacyTHDispatcher(const TensorImpl* impl) {
138-
Backend backend = tensorTypeIdToBackend(impl->type_id());
139-
return globalContext().getLegacyTHDispatcher(
140-
backend, typeMetaToScalarType(impl->dtype()));
141-
}
142-
14394
Allocator* getCPUAllocator() {
14495
return getTHDefaultAllocator();
14596
}
@@ -155,9 +106,6 @@ struct LegacyDeviceTypeInit : public LegacyDeviceTypeInitInterface {
155106
void initHIP() const override {
156107
globalContext().lazyInitHIP();
157108
}
158-
void initComplex() const override {
159-
globalContext().lazyInitComplex();
160-
}
161109
};
162110
REGISTER_LEGACY_TYPE_INIT(LegacyDeviceTypeInit);
163111

aten/src/ATen/Context.h

Lines changed: 0 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,13 @@
22

33
#include <ATen/core/ATenGeneral.h>
44
#include <ATen/Tensor.h>
5-
#include <ATen/TypeExtendedInterface.h>
65
#include <ATen/Utils.h>
7-
#include <ATen/LegacyTHDispatch.h>
8-
#include <ATen/LegacyTHDispatcher.h>
96
#include <ATen/core/ATenGeneral.h>
107
#include <ATen/core/Generator.h>
118
#include <ATen/CPUGenerator.h>
129
#include <ATen/core/LegacyTypeDispatch.h>
13-
#include <ATen/core/VariableHooksInterface.h>
1410
#include <ATen/detail/CUDAHooksInterface.h>
1511
#include <ATen/detail/HIPHooksInterface.h>
16-
#include <ATen/detail/ComplexHooksInterface.h>
1712
#include <c10/util/Exception.h>
1813
#include <c10/core/impl/DeviceGuardImplInterface.h>
1914

@@ -28,35 +23,6 @@ class Tensor;
2823
class CAFFE2_API Context {
2924
public:
3025
Context();
31-
TypeExtendedInterface* getNonVariableTypeRaw(Backend p, ScalarType s) {
32-
return static_cast<TypeExtendedInterface*>(globalLegacyTypeDispatch().getNonVariableTypeRaw(p, s));
33-
}
34-
TypeExtendedInterface * getNonVariableTypeOpt(Backend p, ScalarType s) {
35-
return static_cast<TypeExtendedInterface*>(globalLegacyTypeDispatch().getNonVariableTypeOpt(p, s));
36-
}
37-
TypeExtendedInterface & getNonVariableType(Backend p, ScalarType s) {
38-
return static_cast<TypeExtendedInterface&>(globalLegacyTypeDispatch().getNonVariableType(p, s));
39-
}
40-
TypeExtendedInterface & getVariableType(Backend p, ScalarType s) {
41-
return static_cast<TypeExtendedInterface&>(globalLegacyTypeDispatch().getVariableType(p, s));
42-
}
43-
TypeExtendedInterface & getType(Backend p, ScalarType s, bool is_variable) {
44-
return static_cast<TypeExtendedInterface&>(globalLegacyTypeDispatch().getType(p, s, is_variable));
45-
}
46-
LegacyTHDispatcher& getLegacyTHDispatcher(Backend p, ScalarType s) {
47-
return globalLegacyTHDispatch().getLegacyTHDispatcher(p, s);
48-
}
49-
// The passed in Type must be delete'able
50-
// TODO: Just make it take a unique_ptr
51-
void registerType(Backend b, Type* t) {
52-
globalLegacyTypeDispatch().registerType(b,
53-
LegacyTypeDispatch::TypeUniquePtr{t, LegacyTypeDeleter([](Type* p) { delete p; }) });
54-
}
55-
56-
void registerLegacyTHDispatcher(Backend b, ScalarType s, LegacyTHDispatcher* t) {
57-
globalLegacyTHDispatch().registerDispatcher(b, s,
58-
LegacyTHDispatch::LegacyTHDispatcherUniquePtr{t, LegacyTHDispatcherDeleter([](LegacyTHDispatcher* p) { delete p; }) });
59-
}
6026

6127
Generator & defaultGenerator(Device device) {
6228
DeviceType device_type = device.type();
@@ -102,22 +68,15 @@ class CAFFE2_API Context {
10268
THCState* lazyInitCUDA() {
10369
std::call_once(thc_init,[&] {
10470
thc_state = detail::getCUDAHooks().initCUDA();
105-
detail::getCUDAHooks().registerCUDATypes(this);
10671
});
10772
return thc_state.get();
10873
}
10974
THHState* lazyInitHIP() {
11075
std::call_once(thh_init,[&] {
11176
thh_state = detail::getHIPHooks().initHIP();
112-
detail::getHIPHooks().registerHIPTypes(this);
11377
});
11478
return thh_state.get();
11579
}
116-
void lazyInitComplex() {
117-
std::call_once(complex_init_, [&] {
118-
detail::getComplexHooks().registerComplexTypes(this);
119-
});
120-
}
12180

12281
THCState* getTHCState() {
12382
// AT_ASSERT(thc_state);
@@ -127,9 +86,6 @@ class CAFFE2_API Context {
12786
return thh_state.get();
12887
}
12988

130-
size_t freshTypeID() {
131-
return next_id++;
132-
}
13389
bool setFlushDenormal(bool on);
13490

13591
// NB: This method is *purely* whether or not a user requested
@@ -153,21 +109,13 @@ class CAFFE2_API Context {
153109
lazyInitHIP();
154110
}
155111
}
156-
void initComplexIfNeeded(ScalarType s) {
157-
if (isComplexType(s)) {
158-
lazyInitComplex();
159-
}
160-
}
161112
std::once_flag thc_init;
162113
std::once_flag thh_init;
163-
std::once_flag complex_init_;
164114
bool enabled_cudnn = true;
165115
bool deterministic_cudnn = false;
166116
bool benchmark_cudnn = false;
167-
std::atomic<size_t> next_id;
168117
std::unique_ptr<THCState, void(*)(THCState*)> thc_state;
169118
std::unique_ptr<THHState, void(*)(THHState*)> thh_state;
170-
friend struct Type;
171119
};
172120

173121
CAFFE2_API Context& globalContext();
@@ -176,14 +124,6 @@ static inline void init() {
176124
globalContext();
177125
}
178126

179-
static inline TypeExtendedInterface& getNonVariableType(Backend p, ScalarType s) {
180-
return globalContext().getNonVariableType(p, s);
181-
}
182-
183-
CAFFE2_API TypeExtendedInterface& getType(TensorOptions options);
184-
CAFFE2_API TypeExtendedInterface& getType(const TensorImpl*);
185-
CAFFE2_API TypeExtendedInterface& getType(const Tensor&);
186-
187127
CAFFE2_API Allocator* getCPUAllocator();
188128

189129
static inline DeprecatedTypeProperties& getNonVariableDeprecatedTypeProperties(Backend p, ScalarType s) {
@@ -206,9 +146,6 @@ static inline DeprecatedTypeProperties& HIP(ScalarType s) {
206146
Backend::HIP, s, /*is_variable*/false);
207147
}
208148

209-
CAFFE2_API LegacyTHDispatcher& getLegacyTHDispatcher(TensorOptions options);
210-
CAFFE2_API LegacyTHDispatcher& getLegacyTHDispatcher(const Tensor&);
211-
212149
static inline bool hasCUDA() {
213150
return globalContext().hasCUDA();
214151
}

0 commit comments

Comments
 (0)