25
25
26
26
27
27
class PNDMPipeline (DiffusionPipeline ):
28
+ r"""
29
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
30
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
31
+
32
+ Parameters:
33
+ unet (:obj:`UNet2DModel`): U-Net architecture to denoise the encoded image latents.
34
+ scheduler ([`SchedulerMixin`]):
35
+ The `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image.
36
+ """
37
+
28
38
unet : UNet2DModel
29
39
scheduler : PNDMScheduler
30
40
@@ -43,6 +53,23 @@ def __call__(
43
53
return_dict : bool = True ,
44
54
** kwargs ,
45
55
) -> Union [ImagePipelineOutput , Tuple ]:
56
+ r"""
57
+ Args:
58
+ batch_size (:obj:`int`, `optional`, defaults to 1): The number of images to generate.
59
+ num_inference_steps (:
60
+ obj:`int`, `optional`, defaults to 50): The number of denoising steps. More denoising steps usually
61
+ lead to a higher quality image at the expense of slower inference.
62
+ generator (:
63
+ obj:`torch.Generator`, `optional`): A [torch
64
+ generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
65
+ deterministic.
66
+ output_type (:
67
+ obj:`str`, `optional`, defaults to :obj:`"pil"`): The output format of the generate image. Choose
68
+ between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
69
+ return_dict (:
70
+ obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to return a
71
+ [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
72
+ """
46
73
# For more information on the sampling method you can take a look at Algorithm 2 of
47
74
# the official paper: https://arxiv.org/pdf/2202.09778.pdf
48
75
0 commit comments