We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d5a8282 commit 83233e0Copy full SHA for 83233e0
pipelines/stable_diffusion/pipeline_stable_diffusion.py
@@ -87,8 +87,8 @@ def disable_attention_slicing(self):
87
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
88
back to computing attention in one step.
89
"""
90
- # set slice_size = `None` to disable `set_attention_slice`
91
- self.enable_attention_slice(None)
+ # set slice_size = `None` to disable `attention slicing`
+ self.enable_attention_slicing(None)
92
93
@torch.no_grad()
94
def __call__(
0 commit comments