From 0f91f2f6fc697f01ca6da6724e2b3b5600b56a9b Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Fri, 6 Jun 2025 11:44:00 +0800 Subject: [PATCH] use deterministic to get stable result (#11663) * use deterministic to get stable result Signed-off-by: jiqing-feng * add deterministic for int8 test Signed-off-by: jiqing-feng --------- Signed-off-by: jiqing-feng --- tests/quantization/bnb/test_4bit.py | 1 + tests/quantization/bnb/test_mixed_int8.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/quantization/bnb/test_4bit.py b/tests/quantization/bnb/test_4bit.py index acc6d30b79..ac1b0cf3ce 100644 --- a/tests/quantization/bnb/test_4bit.py +++ b/tests/quantization/bnb/test_4bit.py @@ -476,6 +476,7 @@ class SlowBnb4BitTests(Base4bitTests): r""" Test that loading the model and unquantize it produce correct results. """ + torch.use_deterministic_algorithms(True) self.pipeline_4bit.transformer.dequantize() output = self.pipeline_4bit( prompt=self.prompt, diff --git a/tests/quantization/bnb/test_mixed_int8.py b/tests/quantization/bnb/test_mixed_int8.py index 7abb907ff9..98575b86cd 100644 --- a/tests/quantization/bnb/test_mixed_int8.py +++ b/tests/quantization/bnb/test_mixed_int8.py @@ -478,6 +478,7 @@ class SlowBnb8bitTests(Base8bitTests): r""" Test that loading the model and unquantize it produce correct results. """ + torch.use_deterministic_algorithms(True) self.pipeline_8bit.transformer.dequantize() output = self.pipeline_8bit( prompt=self.prompt,