File tree Expand file tree Collapse file tree 2 files changed +4
-10
lines changed
src/diffusers/pipelines/stable_diffusion Expand file tree Collapse file tree 2 files changed +4
-10
lines changed Original file line number Diff line number Diff line change @@ -304,12 +304,9 @@ def __call__(
304
304
latents = init_latents
305
305
306
306
t_start = max (num_inference_steps - init_timestep + offset , 0 )
307
+ timesteps = self .scheduler .timesteps [t_start :]
307
308
308
- # Some schedulers like PNDM have timesteps as arrays
309
- # It's more optimzed to move all timesteps to correct device beforehand
310
- timesteps_tensor = torch .tensor (self .scheduler .timesteps [t_start :], device = self .device )
311
-
312
- for i , t in enumerate (self .progress_bar (timesteps_tensor )):
309
+ for i , t in enumerate (self .progress_bar (timesteps )):
313
310
t_index = t_start + i
314
311
315
312
# expand the latents if we are doing classifier free guidance
Original file line number Diff line number Diff line change @@ -342,12 +342,9 @@ def __call__(
342
342
latents = init_latents
343
343
344
344
t_start = max (num_inference_steps - init_timestep + offset , 0 )
345
+ timesteps = self .scheduler .timesteps [t_start :]
345
346
346
- # Some schedulers like PNDM have timesteps as arrays
347
- # It's more optimzed to move all timesteps to correct device beforehand
348
- timesteps_tensor = torch .tensor (self .scheduler .timesteps [t_start :], device = self .device )
349
-
350
- for i , t in tqdm (enumerate (timesteps_tensor )):
347
+ for i , t in tqdm (enumerate (timesteps )):
351
348
t_index = t_start + i
352
349
# expand the latents if we are doing classifier free guidance
353
350
latent_model_input = torch .cat ([latents ] * 2 ) if do_classifier_free_guidance else latents
You can’t perform that action at this time.
0 commit comments