16
16
# and https://github.com/hojonathanho/diffusion
17
17
18
18
import math
19
- from typing import Optional , Tuple , Union
19
+ from typing import List , Optional , Tuple , Union
20
20
21
21
import numpy as np
22
22
import torch
25
25
from .scheduling_utils import SchedulerMixin , SchedulerOutput
26
26
27
27
28
- def betas_for_alpha_bar (num_diffusion_timesteps , max_beta = 0.999 ):
28
+ def betas_for_alpha_bar (num_diffusion_timesteps : int , max_beta : float = 0.999 ) -> np . ndarray :
29
29
"""
30
30
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
31
31
(1-beta) over time from t = [0,1].
@@ -43,14 +43,14 @@ def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
43
43
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
44
44
"""
45
45
46
- def alpha_bar (time_step ) :
46
+ def calculate_alpha_bar (time_step : float ) -> float :
47
47
return math .cos ((time_step + 0.008 ) / 1.008 * math .pi / 2 ) ** 2
48
48
49
- betas = []
50
- for i in range (num_diffusion_timesteps ):
51
- t1 = i / num_diffusion_timesteps
52
- t2 = (i + 1 ) / num_diffusion_timesteps
53
- betas .append (min (1 - alpha_bar ( t2 ) / alpha_bar ( t1 ), max_beta ))
49
+ betas : List [ float ] = []
50
+ for diffusion_timestep in range (num_diffusion_timesteps ):
51
+ lower_timestep = diffusion_timestep / num_diffusion_timesteps
52
+ upper_timestep = (diffusion_timestep + 1 ) / num_diffusion_timesteps
53
+ betas .append (min (1 - calculate_alpha_bar ( upper_timestep ) / calculate_alpha_bar ( lower_timestep ), max_beta ))
54
54
return np .array (betas , dtype = np .float32 )
55
55
56
56
@@ -96,7 +96,7 @@ def __init__(
96
96
tensor_format : str = "pt" ,
97
97
):
98
98
if trained_betas is not None :
99
- self .betas = np .asarray (trained_betas )
99
+ self .betas : np . ndarray = np .asarray (trained_betas )
100
100
if beta_schedule == "linear" :
101
101
self .betas = np .linspace (beta_start , beta_end , num_train_timesteps , dtype = np .float32 )
102
102
elif beta_schedule == "scaled_linear" :
@@ -108,8 +108,8 @@ def __init__(
108
108
else :
109
109
raise NotImplementedError (f"{ beta_schedule } does is not implemented for { self .__class__ } " )
110
110
111
- self .alphas = 1.0 - self .betas
112
- self .alphas_cumprod = np .cumprod (self .alphas , axis = 0 )
111
+ self .alphas : np . ndarray = 1.0 - self .betas
112
+ self .alphas_cumprod : np . ndarray = np .cumprod (self .alphas , axis = 0 )
113
113
114
114
# At every step in ddim, we are looking into the previous alphas_cumprod
115
115
# For the final step, there is no previous alphas_cumprod because we are already at 0
@@ -118,10 +118,10 @@ def __init__(
118
118
self .final_alpha_cumprod = np .array (1.0 ) if set_alpha_to_one else self .alphas_cumprod [0 ]
119
119
120
120
# setable values
121
- self .num_inference_steps = None
122
- self .timesteps = np .arange (0 , num_train_timesteps )[::- 1 ].copy ()
121
+ self .num_inference_steps : int = 0
122
+ self .timesteps : np . ndarray = np .arange (0 , num_train_timesteps )[::- 1 ].copy ()
123
123
124
- self .tensor_format = tensor_format
124
+ self .tensor_format : str = tensor_format
125
125
self .set_format (tensor_format = tensor_format )
126
126
127
127
def _get_variance (self , timestep , prev_timestep ):
@@ -134,7 +134,7 @@ def _get_variance(self, timestep, prev_timestep):
134
134
135
135
return variance
136
136
137
- def set_timesteps (self , num_inference_steps : int , offset : int = 0 ):
137
+ def set_timesteps (self , num_inference_steps : int , offset : int = 0 ) -> None :
138
138
"""
139
139
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
140
140
0 commit comments