Skip to content

Commit 1d0060e

Browse files
committed
update class docstrings
1 parent 68cee6a commit 1d0060e

File tree

7 files changed

+39
-15
lines changed

7 files changed

+39
-15
lines changed

src/diffusers/schedulers/scheduling_ddim.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,10 +70,12 @@ class DDIMScheduler(SchedulerMixin, ConfigMixin):
7070
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
7171
trained_betas (`np.ndarray`, optional): TODO
7272
timestep_values (`np.ndarray`, optional): TODO
73-
clip_sample (`bool`, default `True`): option to clip predicted sample between -1 and 1 for numerical stability
73+
clip_sample (`bool`, default `True`):
74+
option to clip predicted sample between -1 and 1 for numerical stability.
7475
set_alpha_to_one (`bool`, default `True`):
75-
if alpha for final step is 1 or the final alpha of the "non-previous" one
76-
tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays
76+
if alpha for final step is 1 or the final alpha of the "non-previous" one.
77+
tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays.
78+
7779
"""
7880

7981
@register_to_config

src/diffusers/schedulers/scheduling_ddpm.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,12 +67,14 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin):
6767
beta_schedule (`str`):
6868
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
6969
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
70-
trained_betas (): TODO
70+
trained_betas (`np.ndarray`, optional): TODO
7171
variance_type (`str`):
7272
options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
7373
`fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
74-
clip_sample (`bool`, default `True`): option to clip predicted sample between -1 and 1 for numerical stability
75-
tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays
74+
clip_sample (`bool`, default `True`):
75+
option to clip predicted sample between -1 and 1 for numerical stability.
76+
tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays.
77+
7678
"""
7779

7880
@register_to_config

src/diffusers/schedulers/scheduling_karras_ve.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ class KarrasVeScheduler(SchedulerMixin, ConfigMixin):
6565
A reasonable range is [0, 10].
6666
s_max (`float`): the end value of the sigma range where we add noise.
6767
A reasonable range is [0.2, 80].
68-
tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays
68+
tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays.
6969
7070
"""
7171

src/diffusers/schedulers/scheduling_lms_discrete.py

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,26 @@
2424

2525

2626
class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin):
27+
"""
28+
Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
29+
Katherine Crowson:
30+
https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
31+
32+
Args:
33+
num_train_timesteps (`int`): number of diffusion steps used to train the model.
34+
beta_start (`float`): the starting `beta` value of inference.
35+
beta_end (`float`): the final `beta` value.
36+
beta_schedule (`str`):
37+
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
38+
`linear` or `scaled_linear`.
39+
trained_betas (`np.ndarray`, optional): TODO
40+
options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`,
41+
`fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`.
42+
timestep_values (`np.ndarry`, optional): TODO
43+
tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays.
44+
45+
"""
46+
2747
@register_to_config
2848
def __init__(
2949
self,
@@ -35,12 +55,8 @@ def __init__(
3555
timestep_values: Optional[np.ndarray] = None,
3656
tensor_format: str = "pt",
3757
):
38-
"""
39-
Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
40-
Katherine Crowson:
41-
https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
42-
"""
43-
58+
if trained_betas is not None:
59+
self.betas = np.asarray(trained_betas)
4460
if beta_schedule == "linear":
4561
self.betas = np.linspace(beta_start, beta_end, num_train_timesteps, dtype=np.float32)
4662
elif beta_schedule == "scaled_linear":

src/diffusers/schedulers/scheduling_pndm.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,11 +67,12 @@ class PNDMScheduler(SchedulerMixin, ConfigMixin):
6767
beta_schedule (`str`):
6868
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
6969
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
70-
trained_betas (): TODO
70+
trained_betas (`np.ndarray`, optional): TODO
7171
tensor_format (`str`): whether the scheduler expects pytorch or numpy arrays
7272
skip_prk_steps (`bool`):
7373
allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required
7474
before plms steps; defaults to `False`.
75+
7576
"""
7677

7778
@register_to_config

src/diffusers/schedulers/scheduling_sde_ve.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin):
5353
sigma_min (`float`):
5454
initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the
5555
distribution of the data.
56-
sigma_max (`float`): TODO
56+
sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model.
5757
sampling_eps (`float`): the end value of sampling, where timesteps decrease progessively from 1 to
5858
epsilon.
5959
correct_steps (`int`): number of correction steps performed on a produced sample.

src/diffusers/schedulers/scheduling_utils.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,9 @@ class SchedulerOutput(BaseOutput):
3838

3939

4040
class SchedulerMixin:
41+
"""
42+
Mixin containing common functions for the schedulers.
43+
"""
4144

4245
config_name = SCHEDULER_CONFIG_NAME
4346
ignore_for_config = ["tensor_format"]

0 commit comments

Comments
 (0)