File tree Expand file tree Collapse file tree 2 files changed +5
-5
lines changed
docker/transformers-pytorch-amd-gpu Expand file tree Collapse file tree 2 files changed +5
-5
lines changed Original file line number Diff line number Diff line change 2121 job : run_models_gpu
2222 slack_report_channel : " #amd-hf-ci"
2323 runner_group : hfc-amd-mi355
24- docker : huggingface/testing-rocm7.0-preview
24+ docker : huggingface/transformers-pytorch-amd-gpu
2525 ci_event : Scheduled CI (AMD) - mi355
2626 report_repo_id : hf-transformers-bot/transformers-ci-dummy
2727 secrets : inherit
3333 job : run_pipelines_torch_gpu
3434 slack_report_channel : " #amd-hf-ci"
3535 runner_group : hfc-amd-mi355
36- docker : huggingface/testing-rocm7.0-preview
36+ docker : huggingface/transformers-pytorch-amd-gpu
3737 ci_event : Scheduled CI (AMD) - mi355
3838 report_repo_id : hf-transformers-bot/transformers-ci-dummy
3939 secrets : inherit
4545 job : run_examples_gpu
4646 slack_report_channel : " #amd-hf-ci"
4747 runner_group : hfc-amd-mi355
48- docker : huggingface/testing-rocm7.0-preview
48+ docker : huggingface/transformers-pytorch-amd-gpu
4949 ci_event : Scheduled CI (AMD) - mi355
5050 report_repo_id : hf-transformers-bot/transformers-ci-dummy
5151 secrets : inherit
Original file line number Diff line number Diff line change @@ -39,7 +39,7 @@ RUN python3 -m pip install --no-cache-dir "torchcodec==0.5"
3939# Install flash attention from source. Tested with commit 6387433156558135a998d5568a9d74c1778666d8
4040RUN git clone https://github.com/ROCm/flash-attention/ -b tridao && \
4141 cd flash-attention && \
42- GPU_ARCHS="gfx942;gfx950 " python setup.py install
43- # GPU_ARCHS builds for MI300, MI325 and MI355
42+ GPU_ARCHS="gfx942" python setup.py install
43+ # GPU_ARCHS builds for MI300, MI325 but not MI355: we would need to add `;gfx950` but it takes too long to build.
4444
4545RUN python3 -m pip install --no-cache-dir einops
You can’t perform that action at this time.
0 commit comments