Skip to content

Commit f16cf66

Browse files
committed
Update base for Update on "[jit] register __getitem__ builtin"
[jit] register __getitem__ builtin Summary: Follow up of #21990, I am switching index select operations to a standard __getitem__ builtin, rather than bunch of different builtins according to the type, such as prim::DictIndex, prim::ListIndex, etc. This will also aligned with the some other magic methods that we already use gh-metadata: pytorch pytorch 22276 gh/wanchaol/28/head
2 parents 6561378 + f81395b commit f16cf66

File tree

334 files changed

+6990
-3702
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

334 files changed

+6990
-3702
lines changed

.circleci/scripts/binary_populate_env.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
7575
export TORCH_PACKAGE_NAME='torch-nightly'
7676
export TORCH_CONDA_BUILD_FOLDER='pytorch-nightly'
7777
78-
export NO_FBGEMM=1
78+
export USE_FBGEMM=0
7979
export PIP_UPLOAD_FOLDER="$PIP_UPLOAD_FOLDER"
8080
export DOCKER_IMAGE="$DOCKER_IMAGE"
8181

.jenkins/pytorch/build-asan.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ export ASAN_OPTIONS=detect_leaks=0:symbolize=1
3333
CC="clang" CXX="clang++" LDSHARED="clang --shared" \
3434
CFLAGS="-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -shared-libasan -pthread" \
3535
CXX_FLAGS="-pthread" \
36-
USE_ASAN=1 NO_CUDA=1 USE_MKLDNN=0 \
36+
USE_ASAN=1 USE_CUDA=0 USE_MKLDNN=0 \
3737
python setup.py install
3838

3939
assert_git_not_dirty

.jenkins/pytorch/macos-build.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *cuda9.2* ]]; then
3030
export PATH=/Developer/NVIDIA/CUDA-${CUDA_VERSION}/bin${PATH:+:${PATH}}
3131
export DYLD_LIBRARY_PATH=/Developer/NVIDIA/CUDA-${CUDA_VERSION}/lib${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}}
3232
export CUDA_HOME=/Developer/NVIDIA/CUDA-${CUDA_VERSION}
33-
export NO_CUDA=0
33+
export USE_CUDA=1
3434

3535
if [ -z "${IN_CIRCLECI}" ]; then
3636
# Eigen gives "explicit specialization of class must precede its first use" error

.jenkins/pytorch/test.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ test_aten() {
138138
}
139139

140140
test_torchvision() {
141-
pip_install git+https://github.com/pytorch/vision.git@2f64dd90e14fe5463b4e5bd152d56e4a6f0419de
141+
pip_install git+https://github.com/pytorch/vision.git@487c9bf4b7750e779fac31c35d930381baa60a4a
142142
}
143143

144144
test_libtorch() {

.jenkins/pytorch/win-test-helpers/build_pytorch.bat

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,12 @@ set CMAKE_GENERATOR=Ninja
7474

7575
if not "%USE_CUDA%"=="1" (
7676
if "%REBUILD%"=="" (
77-
set NO_CUDA=1
77+
:: Must save and restore the original value of USE_CUDA, otherwise the
78+
:: `if not "%USE_CUDA%"=="0"` line can be messed up.
79+
set OLD_USE_CUDA=%USE_CUDA%
80+
set USE_CUDA=0
7881
python setup.py install
82+
set USE_CUDA=%OLD_USE_CUDA%
7983
)
8084
if errorlevel 1 exit /b 1
8185
if not errorlevel 0 exit /b 1
@@ -99,7 +103,7 @@ if not "%USE_CUDA%"=="0" (
99103

100104
set CUDA_NVCC_EXECUTABLE=%TMP_DIR_WIN%\bin\nvcc
101105

102-
if "%REBUILD%"=="" set NO_CUDA=0
106+
if "%REBUILD%"=="" set USE_CUDA=1
103107

104108
python setup.py install --cmake && sccache --show-stats && (
105109
if "%BUILD_ENVIRONMENT%"=="" (

CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,10 @@ if(BUILD_NAMEDTENSOR)
289289
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBUILD_NAMEDTENSOR")
290290
endif()
291291

292+
if(USE_QNNPACK)
293+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_QNNPACK")
294+
endif()
295+
292296
# ---[ Whitelist file if whitelist is specified
293297
include(cmake/Whitelist.cmake)
294298

CONTRIBUTING.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -243,15 +243,15 @@ only interested in a specific component.
243243
Caffe2 operators.
244244

245245
On the initial build, you can also speed things up with the environment
246-
variables `DEBUG` and `NO_CUDA`.
246+
variables `DEBUG` and `USE_CUDA`.
247247

248248
- `DEBUG=1` will enable debug builds (-g -O0)
249249
- `REL_WITH_DEB_INFO=1` will enable debug symbols with optimizations (-g -O3)
250-
- `NO_CUDA=1` will disable compiling CUDA (in case you are developing on something not CUDA related), to save compile time.
250+
- `USE_CUDA=0` will disable compiling CUDA (in case you are developing on something not CUDA related), to save compile time.
251251

252252
For example:
253253
```bash
254-
NO_CUDA=1 DEBUG=1 python setup.py develop
254+
USE_CUDA=0 DEBUG=1 python setup.py develop
255255
```
256256

257257
Make sure you continue to pass these flags on subsequent builds.

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ If you want to compile with CUDA support, install
159159
- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) 9 or above
160160
- [NVIDIA cuDNN](https://developer.nvidia.com/cudnn) v7 or above
161161

162-
If you want to disable CUDA support, export environment variable `NO_CUDA=1`.
162+
If you want to disable CUDA support, export environment variable `USE_CUDA=0`.
163163
Other potentially useful environment variables may be found in `setup.py`.
164164

165165
If you are building for NVIDIA's Jetson platforms (Jetson Nano, TX1, TX2, AGX Xavier), Instructions to [are available here](https://devtalk.nvidia.com/default/topic/1049071/jetson-nano/pytorch-for-jetson-nano/)

aten/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ set(ATen_THIRD_PARTY_INCLUDE)
2424
set(ATen_CUDA_SRCS)
2525
set(ATen_CUDA_TEST_SRCS)
2626
set(ATen_CUDA_INCLUDE)
27+
set(ATen_NVRTC_STUB_SRCS)
2728
set(ATen_HIP_SRCS)
2829
set(ATen_HIP_TEST_SRCS)
2930
set(ATen_HIP_INCLUDE)
@@ -101,6 +102,7 @@ add_subdirectory(src/ATen)
101102
set(ATen_CPU_SRCS ${ATen_CPU_SRCS} PARENT_SCOPE)
102103
set(ATen_CUDA_SRCS ${ATen_CUDA_SRCS} PARENT_SCOPE)
103104
set(ATen_HIP_SRCS ${ATen_HIP_SRCS} PARENT_SCOPE)
105+
set(ATen_NVRTC_STUB_SRCS ${ATen_NVRTC_STUB_SRCS} PARENT_SCOPE)
104106
set(ATen_CPU_TEST_SRCS ${ATen_CPU_TEST_SRCS} PARENT_SCOPE)
105107
set(ATen_CUDA_TEST_SRCS ${ATen_CUDA_TEST_SRCS} PARENT_SCOPE)
106108
set(ATen_HIP_TEST_SRCS ${ATen_HIP_TEST_SRCS} PARENT_SCOPE)

aten/re_worker_requirements

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,9 @@
66
"ATen-cu#compile-pic-Unique.cu.o199e1a23,platform007-clang": {
77
"workerSize": "MEDIUM",
88
"platformType": "LINUX"
9+
},
10+
"ATen-cu#platform007-clang,shared": {
11+
"workerSize": "MEDIUM",
12+
"platformType": "LINUX"
913
}
1014
}

0 commit comments

Comments
 (0)