mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
[Tests] Better prints (#1043)
This commit is contained in:
committed by
GitHub
parent
8d6487f3cb
commit
c4ef1efe46
@@ -40,7 +40,15 @@ from .outputs import BaseOutput
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
from .testing_utils import floats_tensor, load_image, parse_flag_from_env, require_torch_gpu, slow, torch_device
|
||||
from .testing_utils import (
|
||||
floats_tensor,
|
||||
load_image,
|
||||
parse_flag_from_env,
|
||||
require_torch_gpu,
|
||||
slow,
|
||||
torch_all_close,
|
||||
torch_device,
|
||||
)
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
@@ -34,6 +34,14 @@ if is_torch_available():
|
||||
torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device
|
||||
|
||||
|
||||
def torch_all_close(a, b, *args, **kwargs):
|
||||
if not is_torch_available():
|
||||
raise ValueError("PyTorch needs to be installed to use this function.")
|
||||
if not torch.allclose(a, b, *args, **kwargs):
|
||||
assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
|
||||
return True
|
||||
|
||||
|
||||
def get_tests_dir(append_path=None):
|
||||
"""
|
||||
Args:
|
||||
|
||||
@@ -21,7 +21,7 @@ import unittest
|
||||
import torch
|
||||
|
||||
from diffusers import UNet2DConditionModel, UNet2DModel
|
||||
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_device
|
||||
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_all_close, torch_device
|
||||
from parameterized import parameterized
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
@@ -156,7 +156,7 @@ class UNetLDMModelTests(ModelTesterMixin, unittest.TestCase):
|
||||
model_normal_load.eval()
|
||||
arr_normal_load = model_normal_load(noise, time_step)["sample"]
|
||||
|
||||
assert torch.allclose(arr_accelerate, arr_normal_load, rtol=1e-3)
|
||||
assert torch_all_close(arr_accelerate, arr_normal_load, rtol=1e-3)
|
||||
|
||||
@unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU")
|
||||
def test_memory_footprint_gets_reduced(self):
|
||||
@@ -207,7 +207,7 @@ class UNetLDMModelTests(ModelTesterMixin, unittest.TestCase):
|
||||
expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800])
|
||||
# fmt: on
|
||||
|
||||
self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3))
|
||||
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-3))
|
||||
|
||||
|
||||
class UNet2DConditionModelTests(ModelTesterMixin, unittest.TestCase):
|
||||
@@ -287,7 +287,7 @@ class UNet2DConditionModelTests(ModelTesterMixin, unittest.TestCase):
|
||||
named_params = dict(model.named_parameters())
|
||||
named_params_2 = dict(model_2.named_parameters())
|
||||
for name, param in named_params.items():
|
||||
self.assertTrue(torch.allclose(param.grad.data, named_params_2[name].grad.data, atol=5e-5))
|
||||
self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5))
|
||||
|
||||
|
||||
class NCSNppModelTests(ModelTesterMixin, unittest.TestCase):
|
||||
@@ -377,7 +377,7 @@ class NCSNppModelTests(ModelTesterMixin, unittest.TestCase):
|
||||
expected_output_slice = torch.tensor([-4836.2231, -6487.1387, -3816.7969, -7964.9253, -10966.2842, -20043.6016, 8137.0571, 2340.3499, 544.6114])
|
||||
# fmt: on
|
||||
|
||||
self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2))
|
||||
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2))
|
||||
|
||||
def test_output_pretrained_ve_large(self):
|
||||
model = UNet2DModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update")
|
||||
@@ -402,7 +402,7 @@ class NCSNppModelTests(ModelTesterMixin, unittest.TestCase):
|
||||
expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256])
|
||||
# fmt: on
|
||||
|
||||
self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2))
|
||||
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2))
|
||||
|
||||
def test_forward_with_norm_groups(self):
|
||||
# not required for this model
|
||||
@@ -464,7 +464,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -490,7 +490,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -515,7 +515,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -541,7 +541,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -566,7 +566,7 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -592,4 +592,4 @@ class UNet2DConditionModelIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@@ -20,7 +20,7 @@ import torch
|
||||
|
||||
from diffusers import AutoencoderKL
|
||||
from diffusers.modeling_utils import ModelMixin
|
||||
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_device
|
||||
from diffusers.utils import floats_tensor, require_torch_gpu, slow, torch_all_close, torch_device
|
||||
from parameterized import parameterized
|
||||
|
||||
from ..test_modeling_common import ModelTesterMixin
|
||||
@@ -131,7 +131,7 @@ class AutoencoderKLTests(ModelTesterMixin, unittest.TestCase):
|
||||
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485]
|
||||
)
|
||||
|
||||
self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-2))
|
||||
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2))
|
||||
|
||||
|
||||
@slow
|
||||
@@ -185,7 +185,7 @@ class AutoencoderKLIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -209,7 +209,7 @@ class AutoencoderKLIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -231,7 +231,7 @@ class AutoencoderKLIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -254,7 +254,7 @@ class AutoencoderKLIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, :2, -2:].flatten().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -276,7 +276,7 @@ class AutoencoderKLIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
@@ -300,4 +300,4 @@ class AutoencoderKLIntegrationTests(unittest.TestCase):
|
||||
output_slice = sample[0, -1, -3:, -3:].flatten().cpu()
|
||||
expected_output_slice = torch.tensor(expected_slice)
|
||||
|
||||
assert torch.allclose(output_slice, expected_output_slice, atol=1e-4)
|
||||
assert torch_all_close(output_slice, expected_output_slice, atol=1e-4)
|
||||
|
||||
Reference in New Issue
Block a user