Skip to content

Commit 91778a9

Browse files
committed
Update
[ghstack-poisoned]
2 parents 452fedf + f253d07 commit 91778a9

File tree

260 files changed

+6734
-2150
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

260 files changed

+6734
-2150
lines changed

.ci/manywheel/build_common.sh

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,14 @@ retry () {
1818
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
1919
}
2020

21+
PLATFORM="manylinux2014_x86_64"
2122
# TODO move this into the Docker images
2223
OS_NAME=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
2324
if [[ "$OS_NAME" == *"CentOS Linux"* ]]; then
2425
retry yum install -q -y zip openssl
2526
elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then
2627
retry yum install -q -y zip openssl
28+
PLATFORM="manylinux_2_28_x86_64"
2729
elif [[ "$OS_NAME" == *"Red Hat Enterprise Linux"* ]]; then
2830
retry dnf install -q -y zip openssl
2931
elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then
@@ -377,6 +379,12 @@ for pkg in /$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/torch*linux*.w
377379
$PATCHELF_BIN --print-rpath $sofile
378380
done
379381

382+
# create Manylinux 2_28 tag this needs to happen before regenerate the RECORD
383+
if [[ $PLATFORM == "manylinux_2_28_x86_64" && $GPU_ARCH_TYPE != "cpu-s390x" && $GPU_ARCH_TYPE != "xpu" ]]; then
384+
wheel_file=$(echo $(basename $pkg) | sed -e 's/-cp.*$/.dist-info\/WHEEL/g')
385+
sed -i -e s#linux_x86_64#"${PLATFORM}"# $wheel_file;
386+
fi
387+
380388
# regenerate the RECORD file with new hashes
381389
record_file=$(echo $(basename $pkg) | sed -e 's/-cp.*$/.dist-info\/RECORD/g')
382390
if [[ -e $record_file ]]; then
@@ -416,12 +424,20 @@ for pkg in /$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/torch*linux*.w
416424
popd
417425
fi
418426

419-
# zip up the wheel back
420-
zip -rq $(basename $pkg) $PREIX*
427+
# Rename wheel for Manylinux 2_28
428+
if [[ $PLATFORM == "manylinux_2_28_x86_64" && $GPU_ARCH_TYPE != "cpu-s390x" && $GPU_ARCH_TYPE != "xpu" ]]; then
429+
pkg_name=$(echo $(basename $pkg) | sed -e s#linux_x86_64#"${PLATFORM}"#)
430+
zip -rq $pkg_name $PREIX*
431+
rm -f $pkg
432+
mv $pkg_name $(dirname $pkg)/$pkg_name
433+
else
434+
# zip up the wheel back
435+
zip -rq $(basename $pkg) $PREIX*
436+
# remove original wheel
437+
rm -f $pkg
438+
mv $(basename $pkg) $pkg
439+
fi
421440
422-
# replace original wheel
423-
rm -f $pkg
424-
mv $(basename $pkg) $pkg
425441
cd ..
426442
rm -rf tmp
427443
done
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
2+
project(simple-torch-test)
3+
4+
find_package(Torch REQUIRED)
5+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
6+
7+
8+
add_executable(simple-torch-test simple-torch-test.cpp)
9+
target_include_directories(simple-torch-test PRIVATE ${TORCH_INCLUDE_DIRS})
10+
target_link_libraries(simple-torch-test "${TORCH_LIBRARIES}")
11+
set_property(TARGET simple-torch-test PROPERTY CXX_STANDARD 17)
12+
13+
find_package(CUDAToolkit 11.8)
14+
15+
target_link_libraries(simple-torch-test CUDA::cudart CUDA::cufft CUDA::cusparse CUDA::cublas CUDA::cusolver)
16+
find_library(CUDNN_LIBRARY NAMES cudnn)
17+
target_link_libraries(simple-torch-test ${CUDNN_LIBRARY} )
18+
if(MSVC)
19+
file(GLOB TORCH_DLLS "$ENV{CUDA_PATH}/bin/cudnn64_8.dll" "$ENV{NVTOOLSEXT_PATH}/bin/x64/*.dll")
20+
message("dlls to copy " ${TORCH_DLLS})
21+
add_custom_command(TARGET simple-torch-test
22+
POST_BUILD
23+
COMMAND ${CMAKE_COMMAND} -E copy_if_different
24+
${TORCH_DLLS}
25+
$<TARGET_FILE_DIR:simple-torch-test>)
26+
endif(MSVC)
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
#include <torch/torch.h>
2+
3+
int main(int argc, const char* argv[]) {
4+
std::cout << "Checking that CUDA archs are setup correctly" << std::endl;
5+
TORCH_CHECK(torch::rand({ 3, 5 }, torch::Device(torch::kCUDA)).defined(), "CUDA archs are not setup correctly");
6+
7+
// These have to run after CUDA is initialized
8+
9+
std::cout << "Checking that magma is available" << std::endl;
10+
TORCH_CHECK(torch::hasMAGMA(), "MAGMA is not available");
11+
12+
std::cout << "Checking that CuDNN is available" << std::endl;
13+
TORCH_CHECK(torch::cuda::cudnn_is_available(), "CuDNN is not available");
14+
return 0;
15+
}
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
#include <torch/torch.h>
2+
3+
int main(int argc, const char* argv[]) {
4+
TORCH_CHECK(torch::hasMKL(), "MKL is not available");
5+
return 0;
6+
}
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#include <ATen/ATen.h>
2+
#include <torch/torch.h>
3+
4+
int main(int argc, const char* argv[]) {
5+
TORCH_CHECK(at::globalContext().isXNNPACKAvailable(), "XNNPACK is not available");
6+
return 0;
7+
}
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
r"""
2+
It's used to check basic rnn features with cuda.
3+
For example, it would throw exception if some components are missing
4+
"""
5+
6+
import torch
7+
import torch.nn as nn
8+
import torch.nn.functional as F
9+
import torch.optim as optim
10+
11+
12+
class SimpleCNN(nn.Module):
13+
def __init__(self):
14+
super().__init__()
15+
self.conv = nn.Conv2d(1, 1, 3)
16+
self.pool = nn.MaxPool2d(2, 2)
17+
18+
def forward(self, inputs):
19+
output = self.pool(F.relu(self.conv(inputs)))
20+
output = output.view(1)
21+
return output
22+
23+
24+
# Mock one infer
25+
device = torch.device("cuda:0")
26+
net = SimpleCNN().to(device)
27+
net_inputs = torch.rand((1, 1, 5, 5), device=device)
28+
outputs = net(net_inputs)
29+
print(outputs)
30+
31+
criterion = nn.MSELoss()
32+
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.1)
33+
34+
# Mock one step training
35+
label = torch.full((1,), 1.0, dtype=torch.float, device=device)
36+
loss = criterion(outputs, label)
37+
loss.backward()
38+
optimizer.step()
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
r"""
2+
It's used to check basic rnn features with cuda.
3+
For example, it would throw exception if missing some components are missing
4+
"""
5+
6+
import torch
7+
import torch.nn as nn
8+
9+
10+
device = torch.device("cuda:0")
11+
rnn = nn.RNN(10, 20, 2).to(device)
12+
inputs = torch.randn(5, 3, 10).to(device)
13+
h0 = torch.randn(2, 3, 20).to(device)
14+
output, hn = rnn(inputs, h0)
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
#include <torch/torch.h>
2+
3+
int main(int argc, const char* argv[]) {
4+
TORCH_WARN("Simple test passed!");
5+
return 0;
6+
}
Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
@echo off
2+
3+
:: This script parses args, installs required libraries (miniconda, MKL,
4+
:: Magma), and then delegates to cpu.bat, cuda80.bat, etc.
5+
6+
if not "%CUDA_VERSION%" == "" if not "%PYTORCH_BUILD_VERSION%" == "" if not "%PYTORCH_BUILD_NUMBER%" == "" goto env_end
7+
if "%~1"=="" goto arg_error
8+
if "%~2"=="" goto arg_error
9+
if "%~3"=="" goto arg_error
10+
if not "%~4"=="" goto arg_error
11+
goto arg_end
12+
13+
:arg_error
14+
15+
echo Illegal number of parameters. Pass cuda version, pytorch version, build number
16+
echo CUDA version should be Mm with no dot, e.g. '80'
17+
echo DESIRED_PYTHON should be M.m, e.g. '2.7'
18+
exit /b 1
19+
20+
:arg_end
21+
22+
set CUDA_VERSION=%~1
23+
set PYTORCH_BUILD_VERSION=%~2
24+
set PYTORCH_BUILD_NUMBER=%~3
25+
26+
:env_end
27+
28+
set CUDA_PREFIX=cuda%CUDA_VERSION%
29+
if "%CUDA_VERSION%" == "cpu" set CUDA_PREFIX=cpu
30+
if "%CUDA_VERSION%" == "xpu" set CUDA_PREFIX=xpu
31+
32+
if "%DESIRED_PYTHON%" == "" set DESIRED_PYTHON=3.5;3.6;3.7
33+
set DESIRED_PYTHON_PREFIX=%DESIRED_PYTHON:.=%
34+
set DESIRED_PYTHON_PREFIX=py%DESIRED_PYTHON_PREFIX:;=;py%
35+
36+
set SRC_DIR=%~dp0
37+
pushd %SRC_DIR%
38+
39+
:: Install Miniconda3
40+
set "CONDA_HOME=%CD%\conda"
41+
set "tmp_conda=%CONDA_HOME%"
42+
set "miniconda_exe=%CD%\miniconda.exe"
43+
rmdir /s /q conda
44+
del miniconda.exe
45+
curl --retry 3 -k https://repo.anaconda.com/miniconda/Miniconda3-py311_23.9.0-0-Windows-x86_64.exe -o "%miniconda_exe%"
46+
start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda%
47+
if ERRORLEVEL 1 exit /b 1
48+
set "ORIG_PATH=%PATH%"
49+
set "PATH=%CONDA_HOME%;%CONDA_HOME%\scripts;%CONDA_HOME%\Library\bin;%PATH%"
50+
51+
:: create a new conda environment and install packages
52+
:try
53+
SET /A tries=3
54+
:loop
55+
IF %tries% LEQ 0 GOTO :exception
56+
call condaenv.bat
57+
IF %ERRORLEVEL% EQU 0 GOTO :done
58+
SET /A "tries=%tries%-1"
59+
:exception
60+
echo "Failed to create conda env"
61+
exit /B 1
62+
:done
63+
64+
:: Download MAGMA Files on CUDA builds
65+
set MAGMA_VERSION=2.5.4
66+
67+
if "%DEBUG%" == "1" (
68+
set BUILD_TYPE=debug
69+
) else (
70+
set BUILD_TYPE=release
71+
)
72+
73+
if not "%CUDA_VERSION%" == "cpu" if not "%CUDA_VERSION%" == "xpu" (
74+
rmdir /s /q magma_%CUDA_PREFIX%_%BUILD_TYPE%
75+
del magma_%CUDA_PREFIX%_%BUILD_TYPE%.7z
76+
curl -k https://s3.amazonaws.com/ossci-windows/magma_%MAGMA_VERSION%_%CUDA_PREFIX%_%BUILD_TYPE%.7z -o magma_%CUDA_PREFIX%_%BUILD_TYPE%.7z
77+
7z x -aoa magma_%CUDA_PREFIX%_%BUILD_TYPE%.7z -omagma_%CUDA_PREFIX%_%BUILD_TYPE%
78+
)
79+
80+
:: Install sccache
81+
if "%USE_SCCACHE%" == "1" (
82+
mkdir %CD%\tmp_bin
83+
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output %CD%\tmp_bin\sccache.exe
84+
curl -k https://s3.amazonaws.com/ossci-windows/sccache-cl.exe --output %CD%\tmp_bin\sccache-cl.exe
85+
if not "%CUDA_VERSION%" == "" (
86+
set ADDITIONAL_PATH=%CD%\tmp_bin
87+
set SCCACHE_IDLE_TIMEOUT=1500
88+
89+
:: randomtemp is used to resolve the intermittent build error related to CUDA.
90+
:: code: https://github.com/peterjc123/randomtemp-rust
91+
:: issue: https://github.com/pytorch/pytorch/issues/25393
92+
::
93+
:: CMake requires a single command as CUDA_NVCC_EXECUTABLE, so we push the wrappers
94+
:: randomtemp.exe and sccache.exe into a batch file which CMake invokes.
95+
curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.4/randomtemp.exe --output %SRC_DIR%\tmp_bin\randomtemp.exe
96+
echo @"%SRC_DIR%\tmp_bin\randomtemp.exe" "%SRC_DIR%\tmp_bin\sccache.exe" "%CUDA_PATH%\bin\nvcc.exe" %%* > "%SRC_DIR%/tmp_bin/nvcc.bat"
97+
cat %SRC_DIR%/tmp_bin/nvcc.bat
98+
set CUDA_NVCC_EXECUTABLE=%SRC_DIR%/tmp_bin/nvcc.bat
99+
:: CMake doesn't accept back-slashes in the path
100+
for /F "usebackq delims=" %%n in (`cygpath -m "%CUDA_PATH%\bin\nvcc.exe"`) do set CMAKE_CUDA_COMPILER=%%n
101+
set CMAKE_CUDA_COMPILER_LAUNCHER=%SRC_DIR%\tmp_bin\randomtemp.exe;%SRC_DIR%\tmp_bin\sccache.exe
102+
)
103+
)
104+
105+
set PYTORCH_BINARY_BUILD=1
106+
set TH_BINARY_BUILD=1
107+
set INSTALL_TEST=0
108+
109+
for %%v in (%DESIRED_PYTHON_PREFIX%) do (
110+
:: Activate Python Environment
111+
set PYTHON_PREFIX=%%v
112+
set "CONDA_LIB_PATH=%CONDA_HOME%\envs\%%v\Library\bin"
113+
if not "%ADDITIONAL_PATH%" == "" (
114+
set "PATH=%ADDITIONAL_PATH%;%CONDA_HOME%\envs\%%v;%CONDA_HOME%\envs\%%v\scripts;%CONDA_HOME%\envs\%%v\Library\bin;%ORIG_PATH%"
115+
) else (
116+
set "PATH=%CONDA_HOME%\envs\%%v;%CONDA_HOME%\envs\%%v\scripts;%CONDA_HOME%\envs\%%v\Library\bin;%ORIG_PATH%"
117+
)
118+
pip install ninja
119+
@setlocal
120+
:: Set Flags
121+
if not "%CUDA_VERSION%"=="cpu" if not "%CUDA_VERSION%" == "xpu" (
122+
set MAGMA_HOME=%cd%\magma_%CUDA_PREFIX%_%BUILD_TYPE%
123+
)
124+
echo "Calling arch build script"
125+
call %CUDA_PREFIX%.bat
126+
if ERRORLEVEL 1 exit /b 1
127+
@endlocal
128+
)
129+
130+
set "PATH=%ORIG_PATH%"
131+
popd
132+
133+
if ERRORLEVEL 1 exit /b 1

.ci/pytorch/windows/condaenv.bat

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
IF "%DESIRED_PYTHON%"=="" (
2+
echo DESIRED_PYTHON is NOT defined.
3+
exit /b 1
4+
)
5+
6+
:: Create a new conda environment
7+
setlocal EnableDelayedExpansion
8+
FOR %%v IN (%DESIRED_PYTHON%) DO (
9+
set PYTHON_VERSION_STR=%%v
10+
set PYTHON_VERSION_STR=!PYTHON_VERSION_STR:.=!
11+
conda remove -n py!PYTHON_VERSION_STR! --all -y || rmdir %CONDA_HOME%\envs\py!PYTHON_VERSION_STR! /s
12+
if "%%v" == "3.8" call conda create -n py!PYTHON_VERSION_STR! -y -q numpy=1.11 pyyaml boto3 cmake ninja typing_extensions setuptools=72.1.0 python=%%v
13+
if "%%v" == "3.9" call conda create -n py!PYTHON_VERSION_STR! -y -q numpy=2.0.1 pyyaml boto3 cmake ninja typing_extensions setuptools=72.1.0 python=%%v
14+
if "%%v" == "3.10" call conda create -n py!PYTHON_VERSION_STR! -y -q -c=conda-forge numpy=2.0.1 pyyaml boto3 cmake ninja typing_extensions setuptools=72.1.0 python=%%v
15+
if "%%v" == "3.11" call conda create -n py!PYTHON_VERSION_STR! -y -q -c=conda-forge numpy=2.0.1 pyyaml boto3 cmake ninja typing_extensions setuptools=72.1.0 python=%%v
16+
if "%%v" == "3.12" call conda create -n py!PYTHON_VERSION_STR! -y -q -c=conda-forge numpy=2.0.1 pyyaml boto3 cmake ninja typing_extensions setuptools=72.1.0 python=%%v
17+
if "%%v" == "3.13" call conda create -n py!PYTHON_VERSION_STR! -y -q -c=conda-forge numpy=2.1.2 pyyaml boto3 cmake ninja typing_extensions setuptools=72.1.0 python=%%v
18+
call conda run -n py!PYTHON_VERSION_STR! pip install mkl-include
19+
call conda run -n py!PYTHON_VERSION_STR! pip install mkl-static
20+
)
21+
endlocal
22+
23+
:: Install libuv
24+
conda install -y -q -c conda-forge libuv=1.39
25+
set libuv_ROOT=%CONDA_HOME%\Library
26+
echo libuv_ROOT=%libuv_ROOT%

0 commit comments

Comments
 (0)