@@ -1858,14 +1858,14 @@ def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> No
1858
1858
expected_slice = np .array (
1859
1859
[1.1078 , 1.5803 , 0.2773 , - 0.0589 , - 1.7928 , - 0.3665 , - 0.4695 , - 1.0727 , - 1.1601 ]
1860
1860
)
1861
- assert np .abs (latents_slice .flatten () - expected_slice ).max () < 1e-3
1861
+ assert np .abs (latents_slice .flatten () - expected_slice ).max () < 1e-2
1862
1862
1863
1863
test_callback_fn .has_been_called = False
1864
1864
1865
1865
pipe = StableDiffusionPipeline .from_pretrained (
1866
1866
"CompVis/stable-diffusion-v1-4" , revision = "fp16" , torch_dtype = torch .float16
1867
1867
)
1868
- pipe .to (torch_device )
1868
+ pipe = pipe .to (torch_device )
1869
1869
pipe .set_progress_bar_config (disable = None )
1870
1870
pipe .enable_attention_slicing ()
1871
1871
@@ -1904,7 +1904,7 @@ def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> No
1904
1904
assert latents .shape == (1 , 4 , 64 , 96 )
1905
1905
latents_slice = latents [0 , - 3 :, - 3 :, - 1 ]
1906
1906
expected_slice = np .array ([0.7071 , 0.7831 , 0.8300 , 1.8140 , 1.7840 , 1.9402 , 1.3651 , 1.6590 , 1.2828 ])
1907
- assert np .abs (latents_slice .flatten () - expected_slice ).max () < 1e-3
1907
+ assert np .abs (latents_slice .flatten () - expected_slice ).max () < 1e-2
1908
1908
1909
1909
test_callback_fn .has_been_called = False
1910
1910
0 commit comments