From 729dc522811ff0f5be08cf7fff0f5c43c9d4e9b9 Mon Sep 17 00:00:00 2001 From: apolinario Date: Fri, 7 Oct 2022 09:34:07 +0200 Subject: [PATCH 1/5] Swap fp16 error to warning Also remove the associated test --- src/diffusers/pipeline_utils.py | 8 ++++---- tests/test_pipelines.py | 11 ----------- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/src/diffusers/pipeline_utils.py b/src/diffusers/pipeline_utils.py index 5e7cf2d7a555..cc9ac5158011 100644 --- a/src/diffusers/pipeline_utils.py +++ b/src/diffusers/pipeline_utils.py @@ -169,10 +169,10 @@ def to(self, torch_device: Optional[Union[str, torch.device]] = None): module = getattr(self, name) if isinstance(module, torch.nn.Module): if module.dtype == torch.float16 and str(torch_device) in ["cpu", "mps"]: - raise ValueError( - "Pipelines loaded with `torch_dtype=torch.float16` cannot be moved to `cpu` or `mps` " - "due to the lack of support for `float16` operations on those devices in PyTorch. " - "Please remove the `torch_dtype=torch.float16` argument, or use a `cuda` device." + logger.warn( + "Pipelines loaded with `torch_dtype=torch.float16` cannot be moved to `cpu` or `mps` " + "due to the lack of support for `float16` operations on those devices in PyTorch. " + "Please remove the `torch_dtype=torch.float16` argument, or use a `cuda` device to run inference." ) module.to(torch_device) return self diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index e961378a9a58..567699986eb3 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -247,17 +247,6 @@ def to(self, device): return extract - def test_pipeline_fp16_cpu_error(self): - model = self.dummy_uncond_unet - scheduler = DDPMScheduler(num_train_timesteps=10) - pipe = DDIMPipeline(model.half(), scheduler) - - if str(torch_device) in ["cpu", "mps"]: - self.assertRaises(ValueError, pipe.to, torch_device) - else: - # moving the pipeline to GPU should work - pipe.to(torch_device) - def test_ddim(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() From f7dc386cab50305e92a1a3641cfe2453ede1f6e6 Mon Sep 17 00:00:00 2001 From: apolinario Date: Fri, 7 Oct 2022 09:36:33 +0200 Subject: [PATCH 2/5] Formatting --- src/diffusers/pipeline_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipeline_utils.py b/src/diffusers/pipeline_utils.py index cc9ac5158011..b2ad577150b6 100644 --- a/src/diffusers/pipeline_utils.py +++ b/src/diffusers/pipeline_utils.py @@ -170,9 +170,9 @@ def to(self, torch_device: Optional[Union[str, torch.device]] = None): if isinstance(module, torch.nn.Module): if module.dtype == torch.float16 and str(torch_device) in ["cpu", "mps"]: logger.warn( - "Pipelines loaded with `torch_dtype=torch.float16` cannot be moved to `cpu` or `mps` " - "due to the lack of support for `float16` operations on those devices in PyTorch. " - "Please remove the `torch_dtype=torch.float16` argument, or use a `cuda` device to run inference." + "Pipelines loaded with `torch_dtype=torch.float16` cannot be moved to `cpu` or `mps` " + "due to the lack of support for `float16` operations on those devices in PyTorch. " + "Please remove the `torch_dtype=torch.float16` argument, or use a `cuda` device to run inference." ) module.to(torch_device) return self From e6a06de7f2591418eaea41b9f9b9da8b3b3dc420 Mon Sep 17 00:00:00 2001 From: apolinario Date: Fri, 7 Oct 2022 10:07:09 +0200 Subject: [PATCH 3/5] warn -> warning --- src/diffusers/pipeline_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipeline_utils.py b/src/diffusers/pipeline_utils.py index b2ad577150b6..e0873c9f95a4 100644 --- a/src/diffusers/pipeline_utils.py +++ b/src/diffusers/pipeline_utils.py @@ -169,7 +169,7 @@ def to(self, torch_device: Optional[Union[str, torch.device]] = None): module = getattr(self, name) if isinstance(module, torch.nn.Module): if module.dtype == torch.float16 and str(torch_device) in ["cpu", "mps"]: - logger.warn( + logger.warning( "Pipelines loaded with `torch_dtype=torch.float16` cannot be moved to `cpu` or `mps` " "due to the lack of support for `float16` operations on those devices in PyTorch. " "Please remove the `torch_dtype=torch.float16` argument, or use a `cuda` device to run inference." From 07f1526989fe0a90d8a8df538591d6bf4946d3f7 Mon Sep 17 00:00:00 2001 From: apolinario Date: Fri, 7 Oct 2022 10:17:18 +0200 Subject: [PATCH 4/5] Update src/diffusers/pipeline_utils.py Co-authored-by: Patrick von Platen --- src/diffusers/pipeline_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipeline_utils.py b/src/diffusers/pipeline_utils.py index e0873c9f95a4..67e9dd3a3870 100644 --- a/src/diffusers/pipeline_utils.py +++ b/src/diffusers/pipeline_utils.py @@ -170,7 +170,7 @@ def to(self, torch_device: Optional[Union[str, torch.device]] = None): if isinstance(module, torch.nn.Module): if module.dtype == torch.float16 and str(torch_device) in ["cpu", "mps"]: logger.warning( - "Pipelines loaded with `torch_dtype=torch.float16` cannot be moved to `cpu` or `mps` " + "Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` or `mps` device. It is not recommended to move them to `cpu` or `mps` as running them will fail. Please make sure to use a `cuda` device to run the pipeline in inference. " "due to the lack of support for `float16` operations on those devices in PyTorch. " "Please remove the `torch_dtype=torch.float16` argument, or use a `cuda` device to run inference." ) From e17e7fa5dce683a65d57fb41f16846e97982591c Mon Sep 17 00:00:00 2001 From: apolinario Date: Fri, 7 Oct 2022 10:18:39 +0200 Subject: [PATCH 5/5] make style --- src/diffusers/pipeline_utils.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipeline_utils.py b/src/diffusers/pipeline_utils.py index 67e9dd3a3870..35996d65072c 100644 --- a/src/diffusers/pipeline_utils.py +++ b/src/diffusers/pipeline_utils.py @@ -170,9 +170,11 @@ def to(self, torch_device: Optional[Union[str, torch.device]] = None): if isinstance(module, torch.nn.Module): if module.dtype == torch.float16 and str(torch_device) in ["cpu", "mps"]: logger.warning( - "Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` or `mps` device. It is not recommended to move them to `cpu` or `mps` as running them will fail. Please make sure to use a `cuda` device to run the pipeline in inference. " - "due to the lack of support for `float16` operations on those devices in PyTorch. " - "Please remove the `torch_dtype=torch.float16` argument, or use a `cuda` device to run inference." + "Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` or `mps` device. It" + " is not recommended to move them to `cpu` or `mps` as running them will fail. Please make" + " sure to use a `cuda` device to run the pipeline in inference. due to the lack of support for" + " `float16` operations on those devices in PyTorch. Please remove the" + " `torch_dtype=torch.float16` argument, or use a `cuda` device to run inference." ) module.to(torch_device) return self