1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

Tests compile fixes (#5148)

* test fix

* fix tests

* fix report name

---------

Co-authored-by: Patrick von Platen <patrick.v.platen@gmail.com>
This commit is contained in:
Dhruv Nair
2023-09-26 11:36:46 +05:30
committed by GitHub
parent a91a273d0b
commit bdd2544673
10 changed files with 116 additions and 2 deletions

View File

@@ -26,6 +26,7 @@ jobs:
image-name:
- diffusers-pytorch-cpu
- diffusers-pytorch-cuda
- diffusers-pytorch-compile-cuda
- diffusers-flax-cpu
- diffusers-flax-tpu
- diffusers-onnxruntime-cpu

View File

@@ -74,11 +74,11 @@ jobs:
env:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
CUBLAS_WORKSPACE_CONFIG: :16:8
CUBLAS_WORKSPACE_CONFIG: :16:8
run: |
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
-s -v -k "not Flax and not Onnx" \
-s -v -k "not Flax and not Onnx and not compile" \
--make-reports=tests_${{ matrix.config.report }} \
tests/
@@ -113,6 +113,50 @@ jobs:
name: ${{ matrix.config.report }}_test_reports
path: reports
run_torch_compile_tests:
name: PyTorch Compile CUDA tests
runs-on: docker-gpu
container:
image: diffusers/diffusers-pytorch-compile-cuda
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
steps:
- name: Checkout diffusers
uses: actions/checkout@v3
with:
fetch-depth: 2
- name: NVIDIA-SMI
run: |
nvidia-smi
- name: Install dependencies
run: |
python -m pip install -e .[quality,test,training]
- name: Environment
run: |
python utils/print_env.py
- name: Run example tests on GPU
env:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
run: |
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
- name: Failure short reports
if: ${{ failure() }}
run: cat reports/tests_torch_compile_cuda_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: torch_compile_test_reports
path: reports
run_examples_tests:
name: Examples PyTorch CUDA tests on Ubuntu

View File

@@ -0,0 +1,47 @@
FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04
LABEL maintainer="Hugging Face"
LABEL repository="diffusers"
ENV DEBIAN_FRONTEND=noninteractive
RUN apt update && \
apt install -y bash \
build-essential \
git \
git-lfs \
curl \
ca-certificates \
libsndfile1-dev \
libgl1 \
python3.9 \
python3-pip \
python3.9-venv && \
rm -rf /var/lib/apt/lists
# make sure to use venv
RUN python3 -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
python3 -m pip install --no-cache-dir \
torch \
torchvision \
torchaudio \
invisible_watermark && \
python3 -m pip install --no-cache-dir \
accelerate \
datasets \
hf-doc-builder \
huggingface-hub \
Jinja2 \
librosa \
numpy \
scipy \
tensorboard \
transformers \
omegaconf \
pytorch-lightning \
xformers
CMD ["/bin/bash"]

View File

@@ -266,6 +266,15 @@ def deprecate_after_peft_backend(test_case):
return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case)
def require_python39_or_higher(test_case):
def python39_available():
sys_info = sys.version_info
major, minor = sys_info.major, sys_info.minor
return major == 3 and minor >= 9
return unittest.skipUnless(python39_available(), "test requires Python 3.9 or higher")(test_case)
def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray:
if isinstance(arry, str):
# local_path = "/home/patrick_huggingface_co/"

View File

@@ -33,6 +33,7 @@ from diffusers.training_utils import EMAModel
from diffusers.utils import logging
from diffusers.utils.testing_utils import (
CaptureLogger,
require_python39_or_higher,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
@@ -355,6 +356,7 @@ class ModelTesterMixin:
max_diff = (image - new_image).abs().max().item()
self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes")
@require_python39_or_higher
@require_torch_2
def test_from_save_pretrained_dynamo(self):
init_dict, _ = self.prepare_init_args_and_inputs_for_common()

View File

@@ -36,6 +36,7 @@ from diffusers.utils.testing_utils import (
enable_full_determinism,
load_image,
load_numpy,
require_python39_or_higher,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
@@ -894,6 +895,7 @@ class ControlNetPipelineSlowTests(unittest.TestCase):
expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@require_python39_or_higher
@require_torch_2
def test_stable_diffusion_compile(self):
run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None)

View File

@@ -44,6 +44,7 @@ from diffusers.utils.testing_utils import (
load_numpy,
nightly,
numpy_cosine_similarity_distance,
require_python39_or_higher,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
@@ -988,6 +989,7 @@ class StableDiffusionPipelineSlowTests(unittest.TestCase):
max_diff = np.abs(expected_image - image).max()
assert max_diff < 8e-1
@require_python39_or_higher
@require_torch_2
def test_stable_diffusion_compile(self):
seed = 0

View File

@@ -38,6 +38,7 @@ from diffusers.utils.testing_utils import (
load_image,
load_numpy,
nightly,
require_python39_or_higher,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
@@ -505,6 +506,7 @@ class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
assert out.nsfw_content_detected[0], f"Safety checker should work for prompt: {inputs['prompt']}"
assert np.abs(out.images[0]).sum() < 1e-5 # should be all zeros
@require_python39_or_higher
@require_torch_2
def test_img2img_compile(self):
seed = 0

View File

@@ -42,6 +42,7 @@ from diffusers.utils.testing_utils import (
load_image,
load_numpy,
nightly,
require_python39_or_higher,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
@@ -529,6 +530,7 @@ class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
@require_python39_or_higher
@require_torch_2
def test_inpaint_compile(self):
seed = 0
@@ -770,6 +772,7 @@ class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.Te
# make sure that less than 2.45 GB is allocated
assert mem_bytes < 2.45 * 10**9
@require_python39_or_higher
@require_torch_2
def test_inpaint_compile(self):
pass

View File

@@ -73,6 +73,7 @@ from diffusers.utils.testing_utils import (
require_compel,
require_flax,
require_onnxruntime,
require_python39_or_higher,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
@@ -1636,6 +1637,7 @@ class PipelineSlowTests(unittest.TestCase):
assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
@require_python39_or_higher
@require_torch_2
def test_from_save_pretrained_dynamo(self):
run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=None)