1
0
mirror of https://github.com/huggingface/diffusers.git synced 2026-01-27 17:22:53 +03:00

enable deterministic in bnb 4 bit tests (#11738)

* enable deterministic in bnb 4 bit tests

Signed-off-by: jiqing-feng <jiqing.feng@intel.com>

* fix 8bit test

Signed-off-by: jiqing-feng <jiqing.feng@intel.com>

---------

Signed-off-by: jiqing-feng <jiqing.feng@intel.com>
This commit is contained in:
jiqing-feng
2025-06-23 10:47:36 +08:00
committed by GitHub
parent 7fc53b5d66
commit ee40088fe5
2 changed files with 8 additions and 2 deletions

View File

@@ -96,6 +96,10 @@ class Base4bitTests(unittest.TestCase):
num_inference_steps = 10
seed = 0
@classmethod
def setUpClass(cls):
torch.use_deterministic_algorithms(True)
def get_dummy_inputs(self):
prompt_embeds = load_pt(
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt",
@@ -480,7 +484,6 @@ class SlowBnb4BitTests(Base4bitTests):
r"""
Test that loading the model and unquantize it produce correct results.
"""
torch.use_deterministic_algorithms(True)
self.pipeline_4bit.transformer.dequantize()
output = self.pipeline_4bit(
prompt=self.prompt,

View File

@@ -97,6 +97,10 @@ class Base8bitTests(unittest.TestCase):
num_inference_steps = 10
seed = 0
@classmethod
def setUpClass(cls):
torch.use_deterministic_algorithms(True)
def get_dummy_inputs(self):
prompt_embeds = load_pt(
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt",
@@ -485,7 +489,6 @@ class SlowBnb8bitTests(Base8bitTests):
r"""
Test that loading the model and unquantize it produce correct results.
"""
torch.use_deterministic_algorithms(True)
self.pipeline_8bit.transformer.dequantize()
output = self.pipeline_8bit(
prompt=self.prompt,