diff --git a/src/diffusers/pipeline_utils.py b/src/diffusers/pipeline_utils.py index 5e7cf2d7a555..35996d65072c 100644 --- a/src/diffusers/pipeline_utils.py +++ b/src/diffusers/pipeline_utils.py @@ -169,10 +169,12 @@ def to(self, torch_device: Optional[Union[str, torch.device]] = None): module = getattr(self, name) if isinstance(module, torch.nn.Module): if module.dtype == torch.float16 and str(torch_device) in ["cpu", "mps"]: - raise ValueError( - "Pipelines loaded with `torch_dtype=torch.float16` cannot be moved to `cpu` or `mps` " - "due to the lack of support for `float16` operations on those devices in PyTorch. " - "Please remove the `torch_dtype=torch.float16` argument, or use a `cuda` device." + logger.warning( + "Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` or `mps` device. It" + " is not recommended to move them to `cpu` or `mps` as running them will fail. Please make" + " sure to use a `cuda` device to run the pipeline in inference. due to the lack of support for" + " `float16` operations on those devices in PyTorch. Please remove the" + " `torch_dtype=torch.float16` argument, or use a `cuda` device to run inference." ) module.to(torch_device) return self diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index e961378a9a58..567699986eb3 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -247,17 +247,6 @@ def to(self, device): return extract - def test_pipeline_fp16_cpu_error(self): - model = self.dummy_uncond_unet - scheduler = DDPMScheduler(num_train_timesteps=10) - pipe = DDIMPipeline(model.half(), scheduler) - - if str(torch_device) in ["cpu", "mps"]: - self.assertRaises(ValueError, pipe.to, torch_device) - else: - # moving the pipeline to GPU should work - pipe.to(torch_device) - def test_ddim(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler()