mirror of
https://github.com/huggingface/diffusers.git
synced 2026-01-27 17:22:53 +03:00
speed up Shap-E fast test (#5686)
skip rendering Co-authored-by: yiyixuxu <yixu310@gmail,com>
This commit is contained in:
@@ -160,7 +160,7 @@ class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
"generator": generator,
|
||||
"num_inference_steps": 1,
|
||||
"frame_size": 32,
|
||||
"output_type": "np",
|
||||
"output_type": "latent",
|
||||
}
|
||||
return inputs
|
||||
|
||||
@@ -176,24 +176,12 @@ class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
|
||||
output = pipe(**self.get_dummy_inputs(device))
|
||||
image = output.images[0]
|
||||
image_slice = image[0, -3:, -3:, -1]
|
||||
image = image.cpu().numpy()
|
||||
image_slice = image[-3:, -3:]
|
||||
|
||||
assert image.shape == (20, 32, 32, 3)
|
||||
|
||||
expected_slice = np.array(
|
||||
[
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
]
|
||||
)
|
||||
assert image.shape == (32, 16)
|
||||
|
||||
expected_slice = np.array([-1.0000, -0.6241, 1.0000, -0.8978, -0.6866, 0.7876, -0.7473, -0.2874, 0.6103])
|
||||
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
||||
|
||||
def test_inference_batch_consistent(self):
|
||||
|
||||
@@ -181,7 +181,7 @@ class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
"generator": generator,
|
||||
"num_inference_steps": 1,
|
||||
"frame_size": 32,
|
||||
"output_type": "np",
|
||||
"output_type": "latent",
|
||||
}
|
||||
return inputs
|
||||
|
||||
@@ -197,22 +197,12 @@ class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
||||
|
||||
output = pipe(**self.get_dummy_inputs(device))
|
||||
image = output.images[0]
|
||||
image_slice = image[0, -3:, -3:, -1]
|
||||
image_slice = image[-3:, -3:].cpu().numpy()
|
||||
|
||||
assert image.shape == (20, 32, 32, 3)
|
||||
assert image.shape == (32, 16)
|
||||
|
||||
expected_slice = np.array(
|
||||
[
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
0.00039216,
|
||||
]
|
||||
[-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723]
|
||||
)
|
||||
|
||||
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
||||
|
||||
@@ -493,7 +493,7 @@ class PipelineTesterMixin:
|
||||
|
||||
assert output_batch[0].shape[0] == batch_size
|
||||
|
||||
max_diff = np.abs(output_batch[0][0] - output[0][0]).max()
|
||||
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
|
||||
assert max_diff < expected_max_diff
|
||||
|
||||
def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4):
|
||||
@@ -702,7 +702,7 @@ class PipelineTesterMixin:
|
||||
self.assertLess(max_diff, expected_max_diff, "Attention slicing should not affect the inference results")
|
||||
|
||||
if test_mean_pixel_difference:
|
||||
assert_mean_pixel_difference(output_with_slicing[0], output_without_slicing[0])
|
||||
assert_mean_pixel_difference(to_np(output_with_slicing[0]), to_np(output_without_slicing[0]))
|
||||
|
||||
@unittest.skipIf(
|
||||
torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
|
||||
|
||||
Reference in New Issue
Block a user