mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
585 lines
20 KiB
YAML
585 lines
20 KiB
YAML
name: Nightly and release tests on main/release branch
|
|
|
|
on:
|
|
workflow_dispatch:
|
|
schedule:
|
|
- cron: "0 0 * * *" # every day at midnight
|
|
|
|
env:
|
|
DIFFUSERS_IS_CI: yes
|
|
HF_HUB_ENABLE_HF_TRANSFER: 1
|
|
OMP_NUM_THREADS: 8
|
|
MKL_NUM_THREADS: 8
|
|
PYTEST_TIMEOUT: 600
|
|
RUN_SLOW: yes
|
|
RUN_NIGHTLY: yes
|
|
PIPELINE_USAGE_CUTOFF: 5000
|
|
SLACK_API_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
|
|
|
jobs:
|
|
setup_torch_cuda_pipeline_matrix:
|
|
name: Setup Torch Pipelines CUDA Slow Tests Matrix
|
|
runs-on:
|
|
group: aws-general-8-plus
|
|
container:
|
|
image: diffusers/diffusers-pytorch-cpu
|
|
outputs:
|
|
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
|
steps:
|
|
- name: Checkout diffusers
|
|
uses: actions/checkout@v3
|
|
with:
|
|
fetch-depth: 2
|
|
- name: Install dependencies
|
|
run: |
|
|
pip install -e .[test]
|
|
pip install huggingface_hub
|
|
- name: Fetch Pipeline Matrix
|
|
id: fetch_pipeline_matrix
|
|
run: |
|
|
matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py)
|
|
echo $matrix
|
|
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
|
|
|
- name: Pipeline Tests Artifacts
|
|
if: ${{ always() }}
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: test-pipelines.json
|
|
path: reports
|
|
|
|
run_nightly_tests_for_torch_pipelines:
|
|
name: Nightly Torch Pipelines CUDA Tests
|
|
needs: setup_torch_cuda_pipeline_matrix
|
|
strategy:
|
|
fail-fast: false
|
|
max-parallel: 8
|
|
matrix:
|
|
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
|
|
runs-on:
|
|
group: aws-g4dn-2xlarge
|
|
container:
|
|
image: diffusers/diffusers-pytorch-cuda
|
|
options: --shm-size "16gb" --ipc host --gpus 0
|
|
steps:
|
|
- name: Checkout diffusers
|
|
uses: actions/checkout@v3
|
|
with:
|
|
fetch-depth: 2
|
|
- name: NVIDIA-SMI
|
|
run: nvidia-smi
|
|
- name: Install dependencies
|
|
run: |
|
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
python -m uv pip install -e [quality,test]
|
|
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
python -m uv pip install pytest-reportlog
|
|
- name: Environment
|
|
run: |
|
|
python utils/print_env.py
|
|
- name: Pipeline CUDA Test
|
|
env:
|
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
|
run: |
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
-s -v -k "not Flax and not Onnx" \
|
|
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
|
--report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
|
|
tests/pipelines/${{ matrix.module }}
|
|
- name: Failure short reports
|
|
if: ${{ failure() }}
|
|
run: |
|
|
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
|
|
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
|
- name: Test suite reports artifacts
|
|
if: ${{ always() }}
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: pipeline_${{ matrix.module }}_test_reports
|
|
path: reports
|
|
- name: Generate Report and Notify Channel
|
|
if: always()
|
|
run: |
|
|
pip install slack_sdk tabulate
|
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
|
|
run_nightly_tests_for_other_torch_modules:
|
|
name: Nightly Torch CUDA Tests
|
|
runs-on:
|
|
group: aws-g4dn-2xlarge
|
|
container:
|
|
image: diffusers/diffusers-pytorch-cuda
|
|
options: --shm-size "16gb" --ipc host --gpus 0
|
|
defaults:
|
|
run:
|
|
shell: bash
|
|
strategy:
|
|
fail-fast: false
|
|
max-parallel: 2
|
|
matrix:
|
|
module: [models, schedulers, lora, others, single_file, examples]
|
|
steps:
|
|
- name: Checkout diffusers
|
|
uses: actions/checkout@v3
|
|
with:
|
|
fetch-depth: 2
|
|
|
|
- name: Install dependencies
|
|
run: |
|
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
python -m uv pip install -e [quality,test]
|
|
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
|
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
python -m uv pip install pytest-reportlog
|
|
- name: Environment
|
|
run: python utils/print_env.py
|
|
|
|
- name: Run nightly PyTorch CUDA tests for non-pipeline modules
|
|
if: ${{ matrix.module != 'examples'}}
|
|
env:
|
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
|
run: |
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
-s -v -k "not Flax and not Onnx" \
|
|
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
|
--report-log=tests_torch_${{ matrix.module }}_cuda.log \
|
|
tests/${{ matrix.module }}
|
|
|
|
- name: Run nightly example tests with Torch
|
|
if: ${{ matrix.module == 'examples' }}
|
|
env:
|
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
|
run: |
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
-s -v --make-reports=examples_torch_cuda \
|
|
--report-log=examples_torch_cuda.log \
|
|
examples/
|
|
|
|
- name: Failure short reports
|
|
if: ${{ failure() }}
|
|
run: |
|
|
cat reports/tests_torch_${{ matrix.module }}_cuda_stats.txt
|
|
cat reports/tests_torch_${{ matrix.module }}_cuda_failures_short.txt
|
|
|
|
- name: Test suite reports artifacts
|
|
if: ${{ always() }}
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: torch_${{ matrix.module }}_cuda_test_reports
|
|
path: reports
|
|
|
|
- name: Generate Report and Notify Channel
|
|
if: always()
|
|
run: |
|
|
pip install slack_sdk tabulate
|
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
|
|
run_big_gpu_torch_tests:
|
|
name: Torch tests on big GPU
|
|
strategy:
|
|
fail-fast: false
|
|
max-parallel: 2
|
|
runs-on:
|
|
group: aws-g6e-xlarge-plus
|
|
container:
|
|
image: diffusers/diffusers-pytorch-cuda
|
|
options: --shm-size "16gb" --ipc host --gpus 0
|
|
steps:
|
|
- name: Checkout diffusers
|
|
uses: actions/checkout@v3
|
|
with:
|
|
fetch-depth: 2
|
|
- name: NVIDIA-SMI
|
|
run: nvidia-smi
|
|
- name: Install dependencies
|
|
run: |
|
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
python -m uv pip install -e [quality,test]
|
|
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
|
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
python -m uv pip install pytest-reportlog
|
|
- name: Environment
|
|
run: |
|
|
python utils/print_env.py
|
|
- name: Selected Torch CUDA Test on big GPU
|
|
env:
|
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
|
BIG_GPU_MEMORY: 40
|
|
run: |
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
-m "big_gpu_with_torch_cuda" \
|
|
--make-reports=tests_big_gpu_torch_cuda \
|
|
--report-log=tests_big_gpu_torch_cuda.log \
|
|
tests/
|
|
- name: Failure short reports
|
|
if: ${{ failure() }}
|
|
run: |
|
|
cat reports/tests_big_gpu_torch_cuda_stats.txt
|
|
cat reports/tests_big_gpu_torch_cuda_failures_short.txt
|
|
- name: Test suite reports artifacts
|
|
if: ${{ always() }}
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: torch_cuda_big_gpu_test_reports
|
|
path: reports
|
|
- name: Generate Report and Notify Channel
|
|
if: always()
|
|
run: |
|
|
pip install slack_sdk tabulate
|
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
|
|
torch_minimum_version_cuda_tests:
|
|
name: Torch Minimum Version CUDA Tests
|
|
runs-on:
|
|
group: aws-g4dn-2xlarge
|
|
container:
|
|
image: diffusers/diffusers-pytorch-minimum-cuda
|
|
options: --shm-size "16gb" --ipc host --gpus 0
|
|
defaults:
|
|
run:
|
|
shell: bash
|
|
steps:
|
|
- name: Checkout diffusers
|
|
uses: actions/checkout@v3
|
|
with:
|
|
fetch-depth: 2
|
|
|
|
- name: Install dependencies
|
|
run: |
|
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
python -m uv pip install -e [quality,test]
|
|
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
|
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
|
|
- name: Environment
|
|
run: |
|
|
python utils/print_env.py
|
|
|
|
- name: Run PyTorch CUDA tests
|
|
env:
|
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
|
run: |
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
-s -v -k "not Flax and not Onnx" \
|
|
--make-reports=tests_torch_minimum_version_cuda \
|
|
tests/models/test_modeling_common.py \
|
|
tests/pipelines/test_pipelines_common.py \
|
|
tests/pipelines/test_pipeline_utils.py \
|
|
tests/pipelines/test_pipelines.py \
|
|
tests/pipelines/test_pipelines_auto.py \
|
|
tests/schedulers/test_schedulers.py \
|
|
tests/others
|
|
|
|
- name: Failure short reports
|
|
if: ${{ failure() }}
|
|
run: |
|
|
cat reports/tests_torch_minimum_version_cuda_stats.txt
|
|
cat reports/tests_torch_minimum_version_cuda_failures_short.txt
|
|
|
|
- name: Test suite reports artifacts
|
|
if: ${{ always() }}
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: torch_minimum_version_cuda_test_reports
|
|
path: reports
|
|
|
|
run_flax_tpu_tests:
|
|
name: Nightly Flax TPU Tests
|
|
runs-on:
|
|
group: gcp-ct5lp-hightpu-8t
|
|
if: github.event_name == 'schedule'
|
|
|
|
container:
|
|
image: diffusers/diffusers-flax-tpu
|
|
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
|
|
defaults:
|
|
run:
|
|
shell: bash
|
|
steps:
|
|
- name: Checkout diffusers
|
|
uses: actions/checkout@v3
|
|
with:
|
|
fetch-depth: 2
|
|
|
|
- name: Install dependencies
|
|
run: |
|
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
python -m uv pip install -e [quality,test]
|
|
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
python -m uv pip install pytest-reportlog
|
|
|
|
- name: Environment
|
|
run: python utils/print_env.py
|
|
|
|
- name: Run nightly Flax TPU tests
|
|
env:
|
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
run: |
|
|
python -m pytest -n 0 \
|
|
-s -v -k "Flax" \
|
|
--make-reports=tests_flax_tpu \
|
|
--report-log=tests_flax_tpu.log \
|
|
tests/
|
|
|
|
- name: Failure short reports
|
|
if: ${{ failure() }}
|
|
run: |
|
|
cat reports/tests_flax_tpu_stats.txt
|
|
cat reports/tests_flax_tpu_failures_short.txt
|
|
|
|
- name: Test suite reports artifacts
|
|
if: ${{ always() }}
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: flax_tpu_test_reports
|
|
path: reports
|
|
|
|
- name: Generate Report and Notify Channel
|
|
if: always()
|
|
run: |
|
|
pip install slack_sdk tabulate
|
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
|
|
run_nightly_onnx_tests:
|
|
name: Nightly ONNXRuntime CUDA tests on Ubuntu
|
|
runs-on:
|
|
group: aws-g4dn-2xlarge
|
|
container:
|
|
image: diffusers/diffusers-onnxruntime-cuda
|
|
options: --gpus 0 --shm-size "16gb" --ipc host
|
|
|
|
steps:
|
|
- name: Checkout diffusers
|
|
uses: actions/checkout@v3
|
|
with:
|
|
fetch-depth: 2
|
|
|
|
- name: NVIDIA-SMI
|
|
run: nvidia-smi
|
|
|
|
- name: Install dependencies
|
|
run: |
|
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
python -m uv pip install -e [quality,test]
|
|
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
|
python -m uv pip install pytest-reportlog
|
|
- name: Environment
|
|
run: python utils/print_env.py
|
|
|
|
- name: Run Nightly ONNXRuntime CUDA tests
|
|
env:
|
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
run: |
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
-s -v -k "Onnx" \
|
|
--make-reports=tests_onnx_cuda \
|
|
--report-log=tests_onnx_cuda.log \
|
|
tests/
|
|
|
|
- name: Failure short reports
|
|
if: ${{ failure() }}
|
|
run: |
|
|
cat reports/tests_onnx_cuda_stats.txt
|
|
cat reports/tests_onnx_cuda_failures_short.txt
|
|
|
|
- name: Test suite reports artifacts
|
|
if: ${{ always() }}
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: tests_onnx_cuda_reports
|
|
path: reports
|
|
|
|
- name: Generate Report and Notify Channel
|
|
if: always()
|
|
run: |
|
|
pip install slack_sdk tabulate
|
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
|
|
run_nightly_quantization_tests:
|
|
name: Torch quantization nightly tests
|
|
strategy:
|
|
fail-fast: false
|
|
max-parallel: 2
|
|
matrix:
|
|
config:
|
|
- backend: "bitsandbytes"
|
|
test_location: "bnb"
|
|
- backend: "gguf"
|
|
test_location: "gguf"
|
|
- backend: "torchao"
|
|
test_location: "torchao"
|
|
runs-on:
|
|
group: aws-g6e-xlarge-plus
|
|
container:
|
|
image: diffusers/diffusers-pytorch-cuda
|
|
options: --shm-size "20gb" --ipc host --gpus 0
|
|
steps:
|
|
- name: Checkout diffusers
|
|
uses: actions/checkout@v3
|
|
with:
|
|
fetch-depth: 2
|
|
- name: NVIDIA-SMI
|
|
run: nvidia-smi
|
|
- name: Install dependencies
|
|
run: |
|
|
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
|
python -m uv pip install -e [quality,test]
|
|
python -m uv pip install -U ${{ matrix.config.backend }}
|
|
python -m uv pip install pytest-reportlog
|
|
- name: Environment
|
|
run: |
|
|
python utils/print_env.py
|
|
- name: ${{ matrix.config.backend }} quantization tests on GPU
|
|
env:
|
|
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
|
CUBLAS_WORKSPACE_CONFIG: :16:8
|
|
BIG_GPU_MEMORY: 40
|
|
run: |
|
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
|
--make-reports=tests_${{ matrix.config.backend }}_torch_cuda \
|
|
--report-log=tests_${{ matrix.config.backend }}_torch_cuda.log \
|
|
tests/quantization/${{ matrix.config.test_location }}
|
|
- name: Failure short reports
|
|
if: ${{ failure() }}
|
|
run: |
|
|
cat reports/tests_${{ matrix.config.backend }}_torch_cuda_stats.txt
|
|
cat reports/tests_${{ matrix.config.backend }}_torch_cuda_failures_short.txt
|
|
- name: Test suite reports artifacts
|
|
if: ${{ always() }}
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: torch_cuda_${{ matrix.config.backend }}_reports
|
|
path: reports
|
|
- name: Generate Report and Notify Channel
|
|
if: always()
|
|
run: |
|
|
pip install slack_sdk tabulate
|
|
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
|
|
|
# M1 runner currently not well supported
|
|
# TODO: (Dhruv) add these back when we setup better testing for Apple Silicon
|
|
# run_nightly_tests_apple_m1:
|
|
# name: Nightly PyTorch MPS tests on MacOS
|
|
# runs-on: [ self-hosted, apple-m1 ]
|
|
# if: github.event_name == 'schedule'
|
|
#
|
|
# steps:
|
|
# - name: Checkout diffusers
|
|
# uses: actions/checkout@v3
|
|
# with:
|
|
# fetch-depth: 2
|
|
#
|
|
# - name: Clean checkout
|
|
# shell: arch -arch arm64 bash {0}
|
|
# run: |
|
|
# git clean -fxd
|
|
# - name: Setup miniconda
|
|
# uses: ./.github/actions/setup-miniconda
|
|
# with:
|
|
# python-version: 3.9
|
|
#
|
|
# - name: Install dependencies
|
|
# shell: arch -arch arm64 bash {0}
|
|
# run: |
|
|
# ${CONDA_RUN} python -m pip install --upgrade pip uv
|
|
# ${CONDA_RUN} python -m uv pip install -e [quality,test]
|
|
# ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
|
# ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
|
# ${CONDA_RUN} python -m uv pip install pytest-reportlog
|
|
# - name: Environment
|
|
# shell: arch -arch arm64 bash {0}
|
|
# run: |
|
|
# ${CONDA_RUN} python utils/print_env.py
|
|
# - name: Run nightly PyTorch tests on M1 (MPS)
|
|
# shell: arch -arch arm64 bash {0}
|
|
# env:
|
|
# HF_HOME: /System/Volumes/Data/mnt/cache
|
|
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
# run: |
|
|
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
|
# --report-log=tests_torch_mps.log \
|
|
# tests/
|
|
# - name: Failure short reports
|
|
# if: ${{ failure() }}
|
|
# run: cat reports/tests_torch_mps_failures_short.txt
|
|
#
|
|
# - name: Test suite reports artifacts
|
|
# if: ${{ always() }}
|
|
# uses: actions/upload-artifact@v4
|
|
# with:
|
|
# name: torch_mps_test_reports
|
|
# path: reports
|
|
#
|
|
# - name: Generate Report and Notify Channel
|
|
# if: always()
|
|
# run: |
|
|
# pip install slack_sdk tabulate
|
|
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY run_nightly_tests_apple_m1:
|
|
# name: Nightly PyTorch MPS tests on MacOS
|
|
# runs-on: [ self-hosted, apple-m1 ]
|
|
# if: github.event_name == 'schedule'
|
|
#
|
|
# steps:
|
|
# - name: Checkout diffusers
|
|
# uses: actions/checkout@v3
|
|
# with:
|
|
# fetch-depth: 2
|
|
#
|
|
# - name: Clean checkout
|
|
# shell: arch -arch arm64 bash {0}
|
|
# run: |
|
|
# git clean -fxd
|
|
# - name: Setup miniconda
|
|
# uses: ./.github/actions/setup-miniconda
|
|
# with:
|
|
# python-version: 3.9
|
|
#
|
|
# - name: Install dependencies
|
|
# shell: arch -arch arm64 bash {0}
|
|
# run: |
|
|
# ${CONDA_RUN} python -m pip install --upgrade pip uv
|
|
# ${CONDA_RUN} python -m uv pip install -e [quality,test]
|
|
# ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
|
# ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
|
# ${CONDA_RUN} python -m uv pip install pytest-reportlog
|
|
# - name: Environment
|
|
# shell: arch -arch arm64 bash {0}
|
|
# run: |
|
|
# ${CONDA_RUN} python utils/print_env.py
|
|
# - name: Run nightly PyTorch tests on M1 (MPS)
|
|
# shell: arch -arch arm64 bash {0}
|
|
# env:
|
|
# HF_HOME: /System/Volumes/Data/mnt/cache
|
|
# HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
|
# run: |
|
|
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
|
# --report-log=tests_torch_mps.log \
|
|
# tests/
|
|
# - name: Failure short reports
|
|
# if: ${{ failure() }}
|
|
# run: cat reports/tests_torch_mps_failures_short.txt
|
|
#
|
|
# - name: Test suite reports artifacts
|
|
# if: ${{ always() }}
|
|
# uses: actions/upload-artifact@v4
|
|
# with:
|
|
# name: torch_mps_test_reports
|
|
# path: reports
|
|
#
|
|
# - name: Generate Report and Notify Channel
|
|
# if: always()
|
|
# run: |
|
|
# pip install slack_sdk tabulate
|
|
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|