From c1b2a89e349b3b632c1f32052331e549516f87fc Mon Sep 17 00:00:00 2001 From: yunseong Cho Date: Thu, 2 May 2024 13:59:55 +0900 Subject: [PATCH 01/14] Fix key error for dictionary with randomized order in convert_ldm_unet_checkpoint (#7680) fix key error for different order Co-authored-by: yunseong Co-authored-by: Dhruv Nair --- src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py index f04a21ef4857..b4b1b885dd3c 100644 --- a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +++ b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py @@ -557,7 +557,7 @@ def convert_ldm_unet_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) - output_block_list = {k: sorted(v) for k, v in output_block_list.items()} + output_block_list = {k: sorted(v) for k, v in sorted(output_block_list.items())} if ["conv.bias", "conv.weight"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ From 3ffa7b46e5d896dc35264b50325460f554556a93 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 2 May 2024 13:08:57 +0530 Subject: [PATCH 02/14] Fix hanging pipeline fetching (#7837) update --- .github/workflows/push_tests.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/push_tests.yml b/.github/workflows/push_tests.yml index d5e1c4739497..573802081704 100644 --- a/.github/workflows/push_tests.yml +++ b/.github/workflows/push_tests.yml @@ -21,7 +21,9 @@ env: jobs: setup_torch_cuda_pipeline_matrix: name: Setup Torch Pipelines CUDA Slow Tests Matrix - runs-on: diffusers/diffusers-pytorch-cpu + runs-on: [ self-hosted, intel-cpu, 8-cpu, ci ] + container: + image: diffusers/diffusers-pytorch-cpu outputs: pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }} steps: From 03ca11318e61de5f41c4406ac455d926b32e5714 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 2 May 2024 13:15:38 +0530 Subject: [PATCH 03/14] Update download diff format tests (#7831) update Co-authored-by: Sayak Paul --- .../stable_diffusion/test_stable_diffusion.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 145e0012f8e9..137f1e696d93 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -1257,8 +1257,8 @@ def tearDown(self): def test_download_from_hub(self): ckpt_paths = [ - "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt", - "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix_base.ckpt", + "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", + "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors", ] for ckpt_path in ckpt_paths: @@ -1271,7 +1271,7 @@ def test_download_from_hub(self): assert image_out.shape == (512, 512, 3) def test_download_local(self): - ckpt_filename = hf_hub_download("runwayml/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.ckpt") + ckpt_filename = hf_hub_download("runwayml/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.safetensors") config_filename = hf_hub_download("runwayml/stable-diffusion-v1-5", filename="v1-inference.yaml") pipe = StableDiffusionPipeline.from_single_file( @@ -1285,7 +1285,7 @@ def test_download_local(self): assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): - ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt" + ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" sf_pipe = StableDiffusionPipeline.from_single_file(ckpt_path) sf_pipe.scheduler = DDIMScheduler.from_config(sf_pipe.scheduler.config) @@ -1310,7 +1310,7 @@ def test_download_ckpt_diff_format_is_same(self): def test_single_file_component_configs(self): pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") - ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt" + ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" single_file_pipe = StableDiffusionPipeline.from_single_file(ckpt_path, load_safety_checker=True) for param_name, param_value in single_file_pipe.text_encoder.config.to_dict().items(): From 3c85a57297b22df8921bae39c0a2e3982ee69de7 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 2 May 2024 14:03:35 +0530 Subject: [PATCH 04/14] Update CI cache (#7832) update Co-authored-by: Sayak Paul --- .github/workflows/push_tests.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/push_tests.yml b/.github/workflows/push_tests.yml index 573802081704..b90ac8da2d69 100644 --- a/.github/workflows/push_tests.yml +++ b/.github/workflows/push_tests.yml @@ -62,7 +62,7 @@ jobs: runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-cuda - options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0 --privileged + options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 --privileged steps: - name: Checkout diffusers uses: actions/checkout@v3 @@ -119,7 +119,7 @@ jobs: runs-on: docker-gpu container: image: diffusers/diffusers-pytorch-cuda - options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0 + options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 defaults: run: shell: bash @@ -171,7 +171,7 @@ jobs: runs-on: docker-gpu container: image: diffusers/diffusers-pytorch-cuda - options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0 + options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 defaults: run: shell: bash @@ -221,7 +221,7 @@ jobs: runs-on: docker-tpu container: image: diffusers/diffusers-flax-tpu - options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --privileged + options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --privileged defaults: run: shell: bash @@ -268,7 +268,7 @@ jobs: runs-on: docker-gpu container: image: diffusers/diffusers-onnxruntime-cuda - options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0 + options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 defaults: run: shell: bash @@ -317,7 +317,7 @@ jobs: container: image: diffusers/diffusers-pytorch-compile-cuda - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Checkout diffusers @@ -358,7 +358,7 @@ jobs: container: image: diffusers/diffusers-pytorch-xformers-cuda - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Checkout diffusers @@ -399,7 +399,7 @@ jobs: container: image: diffusers/diffusers-pytorch-cuda - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Checkout diffusers From 44ba90caff5355f1686a7bb79cacbf0a3b27eba5 Mon Sep 17 00:00:00 2001 From: Guillaume LEGENDRE Date: Thu, 2 May 2024 14:53:38 +0200 Subject: [PATCH 05/14] move to new runners (#7839) --- .github/workflows/pr_test_fetcher.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pr_test_fetcher.yml b/.github/workflows/pr_test_fetcher.yml index 4dbb118c6092..7bbaaf2240a2 100644 --- a/.github/workflows/pr_test_fetcher.yml +++ b/.github/workflows/pr_test_fetcher.yml @@ -15,7 +15,7 @@ concurrency: jobs: setup_pr_tests: name: Setup PR Tests - runs-on: docker-cpu + runs-on: [ self-hosted, intel-cpu, 8-cpu, ci ] container: image: diffusers/diffusers-pytorch-cpu options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ @@ -73,7 +73,7 @@ jobs: max-parallel: 2 matrix: modules: ${{ fromJson(needs.setup_pr_tests.outputs.matrix) }} - runs-on: docker-cpu + runs-on: [ self-hosted, intel-cpu, 8-cpu, ci ] container: image: diffusers/diffusers-pytorch-cpu options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ @@ -123,7 +123,7 @@ jobs: config: - name: Hub tests for models, schedulers, and pipelines framework: hub_tests_pytorch - runner: docker-cpu + runner: [ self-hosted, intel-cpu, 8-cpu, ci ] image: diffusers/diffusers-pytorch-cpu report: torch_hub From ce97d7e19b34b999f6bcc123a1437d283b4813aa Mon Sep 17 00:00:00 2001 From: Guillaume LEGENDRE Date: Thu, 2 May 2024 15:18:46 +0200 Subject: [PATCH 06/14] Change GPU Runners (#7840) * Move to new GPU Runners for slow tests * Move to new GPU Runners for nightly tests --- .github/workflows/nightly_tests.yml | 6 +++--- .github/workflows/push_tests.yml | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/nightly_tests.yml b/.github/workflows/nightly_tests.yml index d911dab4a306..2e9ac33d6b00 100644 --- a/.github/workflows/nightly_tests.yml +++ b/.github/workflows/nightly_tests.yml @@ -112,7 +112,7 @@ jobs: run_nightly_tests_for_other_torch_modules: name: Torch Non-Pipelines CUDA Nightly Tests - runs-on: docker-gpu + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-cuda options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0 @@ -185,7 +185,7 @@ jobs: run_lora_nightly_tests: name: Nightly LoRA Tests with PEFT and TORCH - runs-on: docker-gpu + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-cuda options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --gpus 0 @@ -298,7 +298,7 @@ jobs: run_nightly_onnx_tests: name: Nightly ONNXRuntime CUDA tests on Ubuntu - runs-on: docker-gpu + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-onnxruntime-cuda options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ diff --git a/.github/workflows/push_tests.yml b/.github/workflows/push_tests.yml index b90ac8da2d69..ae1dd537408f 100644 --- a/.github/workflows/push_tests.yml +++ b/.github/workflows/push_tests.yml @@ -116,7 +116,7 @@ jobs: torch_cuda_tests: name: Torch CUDA Tests - runs-on: docker-gpu + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-cuda options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 @@ -168,7 +168,7 @@ jobs: peft_cuda_tests: name: PEFT CUDA Tests - runs-on: docker-gpu + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-cuda options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 @@ -265,7 +265,7 @@ jobs: onnx_cuda_tests: name: ONNX CUDA Tests - runs-on: docker-gpu + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-onnxruntime-cuda options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 @@ -313,7 +313,7 @@ jobs: run_torch_compile_tests: name: PyTorch Compile CUDA tests - runs-on: docker-gpu + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-compile-cuda @@ -354,7 +354,7 @@ jobs: run_xformers_tests: name: PyTorch xformers CUDA tests - runs-on: docker-gpu + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-xformers-cuda @@ -395,7 +395,7 @@ jobs: run_examples_tests: name: Examples PyTorch CUDA tests on Ubuntu - runs-on: docker-gpu + runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-cuda From 0d7c4790235ac00b4524b492bc2a680dcc5cf6b0 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 2 May 2024 20:36:47 +0530 Subject: [PATCH 07/14] Update deps for pipe test fetcher (#7838) update Co-authored-by: Sayak Paul --- .github/workflows/push_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/push_tests.yml b/.github/workflows/push_tests.yml index ae1dd537408f..163069be0912 100644 --- a/.github/workflows/push_tests.yml +++ b/.github/workflows/push_tests.yml @@ -37,8 +37,8 @@ jobs: python-version: "3.8" - name: Install dependencies run: | - pip install -e . - pip install huggingface_hub + python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" + python -m uv pip install -e [quality,test] - name: Fetch Pipeline Matrix id: fetch_pipeline_matrix run: | From fa489eaed6b1812c1a1b604bb5c11ea861523f45 Mon Sep 17 00:00:00 2001 From: Aritra Roy Gosthipaty Date: Fri, 3 May 2024 07:46:48 +0530 Subject: [PATCH 08/14] [Tests] reduce the model size in the blipdiffusion fast test (#7849) reducing model size --- .../blipdiffusion/test_blipdiffusion.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/tests/pipelines/blipdiffusion/test_blipdiffusion.py b/tests/pipelines/blipdiffusion/test_blipdiffusion.py index c5eaa3883d09..7e85cef65129 100644 --- a/tests/pipelines/blipdiffusion/test_blipdiffusion.py +++ b/tests/pipelines/blipdiffusion/test_blipdiffusion.py @@ -64,9 +64,9 @@ def get_dummy_components(self): torch.manual_seed(0) text_encoder_config = CLIPTextConfig( vocab_size=1000, - hidden_size=16, - intermediate_size=16, - projection_dim=16, + hidden_size=8, + intermediate_size=8, + projection_dim=8, num_hidden_layers=1, num_attention_heads=1, max_position_embeddings=77, @@ -78,17 +78,17 @@ def get_dummy_components(self): out_channels=4, down_block_types=("DownEncoderBlock2D",), up_block_types=("UpDecoderBlock2D",), - block_out_channels=(32,), + block_out_channels=(8,), + norm_num_groups=8, layers_per_block=1, act_fn="silu", latent_channels=4, - norm_num_groups=16, - sample_size=16, + sample_size=8, ) blip_vision_config = { - "hidden_size": 16, - "intermediate_size": 16, + "hidden_size": 8, + "intermediate_size": 8, "num_hidden_layers": 1, "num_attention_heads": 1, "image_size": 224, @@ -98,32 +98,32 @@ def get_dummy_components(self): blip_qformer_config = { "vocab_size": 1000, - "hidden_size": 16, + "hidden_size": 8, "num_hidden_layers": 1, "num_attention_heads": 1, - "intermediate_size": 16, + "intermediate_size": 8, "max_position_embeddings": 512, "cross_attention_frequency": 1, - "encoder_hidden_size": 16, + "encoder_hidden_size": 8, } qformer_config = Blip2Config( vision_config=blip_vision_config, qformer_config=blip_qformer_config, - num_query_tokens=16, + num_query_tokens=8, tokenizer="hf-internal-testing/tiny-random-bert", ) qformer = Blip2QFormerModel(qformer_config) unet = UNet2DConditionModel( - block_out_channels=(16, 32), - norm_num_groups=16, + block_out_channels=(8, 16), + norm_num_groups=8, layers_per_block=1, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=16, + cross_attention_dim=8, ) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") @@ -189,7 +189,9 @@ def test_blipdiffusion(self): assert image.shape == (1, 16, 16, 4) - expected_slice = np.array([0.7096, 0.5900, 0.6703, 0.4032, 0.7766, 0.3629, 0.5447, 0.4149, 0.8172]) + expected_slice = np.array( + [0.5329548, 0.8372512, 0.33269387, 0.82096875, 0.43657133, 0.3783, 0.5953028, 0.51934963, 0.42142007] + ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 From 6a479588db5201c1f4dfc4c01350f5c91d8d8c0d Mon Sep 17 00:00:00 2001 From: Lucain Date: Fri, 3 May 2024 08:42:57 +0200 Subject: [PATCH 09/14] Respect `resume_download` deprecation (#7843) * Deprecate resume_download * align docstring with transformers * style --------- Co-authored-by: Sayak Paul --- src/diffusers/configuration_utils.py | 8 +++---- src/diffusers/loaders/autoencoder.py | 8 +++---- src/diffusers/loaders/controlnet.py | 8 +++---- src/diffusers/loaders/ip_adapter.py | 8 +++---- src/diffusers/loaders/lora.py | 8 +++---- src/diffusers/loaders/single_file.py | 8 +++---- src/diffusers/loaders/single_file_utils.py | 2 +- src/diffusers/loaders/textual_inversion.py | 8 +++---- src/diffusers/loaders/unet.py | 16 ++++++------- src/diffusers/models/modeling_flax_utils.py | 8 +++---- src/diffusers/models/modeling_utils.py | 8 +++---- src/diffusers/pipelines/auto_pipeline.py | 24 +++++++++---------- .../pipelines/pipeline_flax_utils.py | 8 +++---- .../pipelines/pipeline_loading_utils.py | 4 ++-- src/diffusers/pipelines/pipeline_utils.py | 16 ++++++------- src/diffusers/schedulers/scheduling_utils.py | 6 ++--- .../schedulers/scheduling_utils_flax.py | 6 ++--- src/diffusers/utils/dynamic_modules_utils.py | 15 ++++++------ src/diffusers/utils/hub_utils.py | 2 +- 19 files changed, 86 insertions(+), 85 deletions(-) diff --git a/src/diffusers/configuration_utils.py b/src/diffusers/configuration_utils.py index 8f4a1958975d..7d76687a3d1e 100644 --- a/src/diffusers/configuration_utils.py +++ b/src/diffusers/configuration_utils.py @@ -310,9 +310,9 @@ def load_config( force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -341,7 +341,7 @@ def load_config( """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) diff --git a/src/diffusers/loaders/autoencoder.py b/src/diffusers/loaders/autoencoder.py index b91d27f7d63e..36b022a26ec9 100644 --- a/src/diffusers/loaders/autoencoder.py +++ b/src/diffusers/loaders/autoencoder.py @@ -50,9 +50,9 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs): cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -99,7 +99,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs): original_config_file = kwargs.pop("original_config_file", None) config_file = kwargs.pop("config_file", None) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) diff --git a/src/diffusers/loaders/controlnet.py b/src/diffusers/loaders/controlnet.py index d323f60aa7ee..53b9802d390e 100644 --- a/src/diffusers/loaders/controlnet.py +++ b/src/diffusers/loaders/controlnet.py @@ -50,9 +50,9 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs): cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -89,7 +89,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs): """ original_config_file = kwargs.pop("original_config_file", None) config_file = kwargs.pop("config_file", None) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) diff --git a/src/diffusers/loaders/ip_adapter.py b/src/diffusers/loaders/ip_adapter.py index 28a4334b199c..ef6a53e43196 100644 --- a/src/diffusers/loaders/ip_adapter.py +++ b/src/diffusers/loaders/ip_adapter.py @@ -90,9 +90,9 @@ def load_ip_adapter( force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -135,7 +135,7 @@ def load_ip_adapter( # Load the main state dict first. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) diff --git a/src/diffusers/loaders/lora.py b/src/diffusers/loaders/lora.py index d69db5a83af1..2f525986a096 100644 --- a/src/diffusers/loaders/lora.py +++ b/src/diffusers/loaders/lora.py @@ -176,9 +176,9 @@ def lora_state_dict( force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -208,7 +208,7 @@ def lora_state_dict( # UNet and text encoder or both. cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) diff --git a/src/diffusers/loaders/single_file.py b/src/diffusers/loaders/single_file.py index 752ef18c7a0b..d8ff92d0b0ff 100644 --- a/src/diffusers/loaders/single_file.py +++ b/src/diffusers/loaders/single_file.py @@ -177,9 +177,9 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs): cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -244,7 +244,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs): ``` """ original_config_file = kwargs.pop("original_config_file", None) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) diff --git a/src/diffusers/loaders/single_file_utils.py b/src/diffusers/loaders/single_file_utils.py index 5b01b8da2b1f..c23e594c3976 100644 --- a/src/diffusers/loaders/single_file_utils.py +++ b/src/diffusers/loaders/single_file_utils.py @@ -305,7 +305,7 @@ def fetch_ldm_config_and_checkpoint( pretrained_model_link_or_path, class_name, original_config_file=None, - resume_download=False, + resume_download=None, force_download=False, proxies=None, token=None, diff --git a/src/diffusers/loaders/textual_inversion.py b/src/diffusers/loaders/textual_inversion.py index 05ed64f5dcad..a9b9a9aae052 100644 --- a/src/diffusers/loaders/textual_inversion.py +++ b/src/diffusers/loaders/textual_inversion.py @@ -38,7 +38,7 @@ def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs): cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) @@ -308,9 +308,9 @@ def load_textual_inversion( force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. diff --git a/src/diffusers/loaders/unet.py b/src/diffusers/loaders/unet.py index 3e74411865a3..5d5ed30dc35f 100644 --- a/src/diffusers/loaders/unet.py +++ b/src/diffusers/loaders/unet.py @@ -103,9 +103,9 @@ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -149,7 +149,7 @@ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) token = kwargs.pop("token", None) @@ -1090,9 +1090,9 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs): cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -1114,7 +1114,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs): raise ValueError("FromOriginalUNetMixin is currently only compatible with StableCascadeUNet") config = kwargs.pop("config", None) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) diff --git a/src/diffusers/models/modeling_flax_utils.py b/src/diffusers/models/modeling_flax_utils.py index 1ddcda9005fc..151281070faa 100644 --- a/src/diffusers/models/modeling_flax_utils.py +++ b/src/diffusers/models/modeling_flax_utils.py @@ -245,9 +245,9 @@ def from_pretrained( force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -296,7 +296,7 @@ def from_pretrained( cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) from_pt = kwargs.pop("from_pt", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py index 8d9f2d9e71fc..a8518ca3ff7f 100644 --- a/src/diffusers/models/modeling_utils.py +++ b/src/diffusers/models/modeling_utils.py @@ -446,9 +446,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -530,7 +530,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) force_download = kwargs.pop("force_download", False) from_flax = kwargs.pop("from_flax", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", None) diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index c8b682e8afe4..5fb497ef2e22 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -234,9 +234,9 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs): cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -311,7 +311,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs): """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) @@ -507,9 +507,9 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs): cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -584,7 +584,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs): """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) @@ -783,9 +783,9 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs): cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -860,7 +860,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs): """ cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) local_files_only = kwargs.pop("local_files_only", False) diff --git a/src/diffusers/pipelines/pipeline_flax_utils.py b/src/diffusers/pipelines/pipeline_flax_utils.py index b1035c1f2f42..7534149b559a 100644 --- a/src/diffusers/pipelines/pipeline_flax_utils.py +++ b/src/diffusers/pipelines/pipeline_flax_utils.py @@ -254,9 +254,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -316,7 +316,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P ``` """ cache_dir = kwargs.pop("cache_dir", None) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) diff --git a/src/diffusers/pipelines/pipeline_loading_utils.py b/src/diffusers/pipelines/pipeline_loading_utils.py index 15fb34e72d24..f7d9785043d1 100644 --- a/src/diffusers/pipelines/pipeline_loading_utils.py +++ b/src/diffusers/pipelines/pipeline_loading_utils.py @@ -435,7 +435,7 @@ def _load_empty_model( return_unused_kwargs=True, return_commit_hash=True, force_download=kwargs.pop("force_download", False), - resume_download=kwargs.pop("resume_download", False), + resume_download=kwargs.pop("resume_download", None), proxies=kwargs.pop("proxies", None), local_files_only=kwargs.pop("local_files_only", False), token=kwargs.pop("token", None), @@ -454,7 +454,7 @@ def _load_empty_model( cached_folder, subfolder=name, force_download=kwargs.pop("force_download", False), - resume_download=kwargs.pop("resume_download", False), + resume_download=kwargs.pop("resume_download", None), proxies=kwargs.pop("proxies", None), local_files_only=kwargs.pop("local_files_only", False), token=kwargs.pop("token", None), diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 59e38c910d4a..e5f822caa0ef 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -533,9 +533,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -625,7 +625,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P ``` """ cache_dir = kwargs.pop("cache_dir", None) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) @@ -1216,9 +1216,9 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. @@ -1271,7 +1271,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: """ cache_dir = kwargs.pop("cache_dir", None) - resume_download = kwargs.pop("resume_download", False) + resume_download = kwargs.pop("resume_download", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) diff --git a/src/diffusers/schedulers/scheduling_utils.py b/src/diffusers/schedulers/scheduling_utils.py index 5dbdb82884bc..dcdce6c51f05 100644 --- a/src/diffusers/schedulers/scheduling_utils.py +++ b/src/diffusers/schedulers/scheduling_utils.py @@ -112,9 +112,9 @@ def from_pretrained( force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to resume downloading the model weights and configuration files. If set to `False`, any - incompletely downloaded files are deleted. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. diff --git a/src/diffusers/schedulers/scheduling_utils_flax.py b/src/diffusers/schedulers/scheduling_utils_flax.py index a1d471f910e5..360ca4705e02 100644 --- a/src/diffusers/schedulers/scheduling_utils_flax.py +++ b/src/diffusers/schedulers/scheduling_utils_flax.py @@ -102,9 +102,9 @@ def from_pretrained( force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to delete incompletely received files. Will attempt to resume the download if such a - file exists. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. diff --git a/src/diffusers/utils/dynamic_modules_utils.py b/src/diffusers/utils/dynamic_modules_utils.py index add95812122c..8df418f3fdd4 100644 --- a/src/diffusers/utils/dynamic_modules_utils.py +++ b/src/diffusers/utils/dynamic_modules_utils.py @@ -201,7 +201,7 @@ def get_cached_module_file( module_file: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, - resume_download: bool = False, + resume_download: Optional[bool] = None, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, @@ -228,9 +228,9 @@ def get_cached_module_file( cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they - exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + exist. resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 + of Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. @@ -380,7 +380,7 @@ def get_class_from_dynamic_module( class_name: Optional[str] = None, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, - resume_download: bool = False, + resume_download: Optional[bool] = None, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, @@ -417,8 +417,9 @@ def get_class_from_dynamic_module( force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. - resume_download (`bool`, *optional*, defaults to `False`): - Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + resume_download: + Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v1 of + Diffusers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. diff --git a/src/diffusers/utils/hub_utils.py b/src/diffusers/utils/hub_utils.py index d70ee53aaa41..83f02848fcf4 100644 --- a/src/diffusers/utils/hub_utils.py +++ b/src/diffusers/utils/hub_utils.py @@ -283,7 +283,7 @@ def _get_model_file( cache_dir: Optional[str] = None, force_download: bool = False, proxies: Optional[Dict] = None, - resume_download: bool = False, + resume_download: Optional[bool] = None, local_files_only: bool = False, token: Optional[str] = None, user_agent: Optional[Union[Dict, str]] = None, From 3e35628873e5a1723fdbb84a8789e99f243b4858 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Fri, 3 May 2024 15:09:15 +0530 Subject: [PATCH 10/14] Remove installing python again in container (#7852) update --- .github/workflows/push_tests.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/push_tests.yml b/.github/workflows/push_tests.yml index 163069be0912..00491e54b738 100644 --- a/.github/workflows/push_tests.yml +++ b/.github/workflows/push_tests.yml @@ -31,14 +31,13 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 2 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.8" - name: Install dependencies run: | python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH" python -m uv pip install -e [quality,test] + - name: Environment + run: | + python utils/print_env.py - name: Fetch Pipeline Matrix id: fetch_pipeline_matrix run: | From 58237364b1780223f48a80256f56408efe7b59a0 Mon Sep 17 00:00:00 2001 From: HelloWorldBeginner <294810125@qq.com> Date: Sat, 4 May 2024 02:14:34 +0800 Subject: [PATCH 11/14] Add Ascend NPU support for SDXL fine-tuning and fix the model saving bug when using DeepSpeed. (#7816) * Add Ascend NPU support for SDXL fine-tuning and fix the model saving bug when using DeepSpeed. * fix check code quality * Decouple the NPU flash attention and make it an independent module. * add doc and unit tests for npu flash attention. --------- Co-authored-by: mhh001 Co-authored-by: Sayak Paul --- docs/source/en/api/attnprocessor.md | 3 + examples/controlnet/train_controlnet_sdxl.py | 19 ++- .../train_text_to_image_lora_sdxl.py | 19 ++- src/diffusers/models/activations.py | 14 +- src/diffusers/models/attention_processor.py | 132 +++++++++++++++++- src/diffusers/models/modeling_utils.py | 30 ++++ tests/models/test_modeling_common.py | 56 +++++++- 7 files changed, 261 insertions(+), 12 deletions(-) diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index ab89d4d260f0..f586e9b08f2c 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -55,3 +55,6 @@ An attention processor is a class for applying different types of attention mech ## XFormersAttnProcessor [[autodoc]] models.attention_processor.XFormersAttnProcessor + +## AttnProcessorNPU +[[autodoc]] models.attention_processor.AttnProcessorNPU diff --git a/examples/controlnet/train_controlnet_sdxl.py b/examples/controlnet/train_controlnet_sdxl.py index 62192521a323..288a1e3fb612 100644 --- a/examples/controlnet/train_controlnet_sdxl.py +++ b/examples/controlnet/train_controlnet_sdxl.py @@ -32,7 +32,7 @@ import transformers from accelerate import Accelerator from accelerate.logging import get_logger -from accelerate.utils import ProjectConfiguration, set_seed +from accelerate.utils import DistributedType, ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version @@ -53,7 +53,7 @@ from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card -from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.import_utils import is_torch_npu_available, is_xformers_available from diffusers.utils.torch_utils import is_compiled_module @@ -64,6 +64,8 @@ check_min_version("0.28.0.dev0") logger = get_logger(__name__) +if is_torch_npu_available(): + torch.npu.config.allow_internal_format = False def log_validation(vae, unet, controlnet, args, accelerator, weight_dtype, step, is_final_validation=False): @@ -471,6 +473,9 @@ def parse_args(input_args=None): parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) + parser.add_argument( + "--enable_npu_flash_attention", action="store_true", help="Whether or not to use npu flash attention." + ) parser.add_argument( "--set_grads_to_none", action="store_true", @@ -936,6 +941,13 @@ def load_model_hook(models, input_dir): text_encoder_two.requires_grad_(False) controlnet.train() + if args.enable_npu_flash_attention: + if is_torch_npu_available(): + logger.info("npu flash attention enabled.") + unet.enable_npu_flash_attention() + else: + raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu devices.") + if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers @@ -1235,7 +1247,8 @@ def compute_embeddings(batch, proportion_empty_prompts, text_encoders, tokenizer progress_bar.update(1) global_step += 1 - if accelerator.is_main_process: + # DeepSpeed requires saving weights on every device; saving weights only on the main process would cause issues. + if accelerator.distributed_type == DistributedType.DEEPSPEED or accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: diff --git a/examples/text_to_image/train_text_to_image_lora_sdxl.py b/examples/text_to_image/train_text_to_image_lora_sdxl.py index 0a6a70de2dc7..3604e755c62a 100644 --- a/examples/text_to_image/train_text_to_image_lora_sdxl.py +++ b/examples/text_to_image/train_text_to_image_lora_sdxl.py @@ -32,7 +32,7 @@ import transformers from accelerate import Accelerator from accelerate.logging import get_logger -from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed +from accelerate.utils import DistributedDataParallelKwargs, DistributedType, ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version @@ -60,7 +60,7 @@ is_wandb_available, ) from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card -from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.import_utils import is_torch_npu_available, is_xformers_available from diffusers.utils.torch_utils import is_compiled_module @@ -68,6 +68,8 @@ check_min_version("0.28.0.dev0") logger = get_logger(__name__) +if is_torch_npu_available(): + torch.npu.config.allow_internal_format = False def save_model_card( @@ -419,6 +421,9 @@ def parse_args(input_args=None): parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) + parser.add_argument( + "--enable_npu_flash_attention", action="store_true", help="Whether or not to use npu flash attention." + ) parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") parser.add_argument( "--rank", @@ -623,6 +628,13 @@ def main(args): text_encoder_one.to(accelerator.device, dtype=weight_dtype) text_encoder_two.to(accelerator.device, dtype=weight_dtype) + if args.enable_npu_flash_attention: + if is_torch_npu_available(): + logger.info("npu flash attention enabled.") + unet.enable_npu_flash_attention() + else: + raise ValueError("npu flash attention requires torch_npu extensions and is supported only on npu devices.") + if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers @@ -1149,7 +1161,8 @@ def compute_time_ids(original_size, crops_coords_top_left): accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 - if accelerator.is_main_process: + # DeepSpeed requires saving weights on every device; saving weights only on the main process would cause issues. + if accelerator.distributed_type == DistributedType.DEEPSPEED or accelerator.is_main_process: if global_step % args.checkpointing_steps == 0: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: diff --git a/src/diffusers/models/activations.py b/src/diffusers/models/activations.py index cec83bdded9e..f94b6c8d6d06 100644 --- a/src/diffusers/models/activations.py +++ b/src/diffusers/models/activations.py @@ -18,8 +18,12 @@ from torch import nn from ..utils import deprecate +from ..utils.import_utils import is_torch_npu_available +if is_torch_npu_available(): + import torch_npu + ACTIVATION_FUNCTIONS = { "swish": nn.SiLU(), "silu": nn.SiLU(), @@ -98,9 +102,13 @@ def forward(self, hidden_states, *args, **kwargs): if len(args) > 0 or kwargs.get("scale", None) is not None: deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." deprecate("scale", "1.0.0", deprecation_message) - - hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1) - return hidden_states * self.gelu(gate) + hidden_states = self.proj(hidden_states) + if is_torch_npu_available(): + # using torch_npu.npu_geglu can run faster and save memory on NPU. + return torch_npu.npu_geglu(hidden_states, dim=-1, approximate=1)[0] + else: + hidden_states, gate = hidden_states.chunk(2, dim=-1) + return hidden_states * self.gelu(gate) class ApproximateGELU(nn.Module): diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index 429807989296..ea1c987e95c6 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import inspect +import math from importlib import import_module from typing import Callable, List, Optional, Union @@ -21,13 +22,15 @@ from ..image_processor import IPAdapterMaskProcessor from ..utils import deprecate, logging -from ..utils.import_utils import is_xformers_available +from ..utils.import_utils import is_torch_npu_available, is_xformers_available from ..utils.torch_utils import maybe_allow_in_graph from .lora import LoRALinearLayer logger = logging.get_logger(__name__) # pylint: disable=invalid-name +if is_torch_npu_available(): + import torch_npu if is_xformers_available(): import xformers @@ -209,6 +212,23 @@ def __init__( ) self.set_processor(processor) + def set_use_npu_flash_attention(self, use_npu_flash_attention: bool) -> None: + r""" + Set whether to use npu flash attention from `torch_npu` or not. + + """ + if use_npu_flash_attention: + processor = AttnProcessorNPU() + else: + # set attention processor + # We use the AttnProcessor2_0 by default when torch 2.x is used which uses + # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention + # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 + processor = ( + AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") and self.scale_qk else AttnProcessor() + ) + self.set_processor(processor) + def set_use_memory_efficient_attention_xformers( self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None ) -> None: @@ -1207,6 +1227,116 @@ def __call__( return hidden_states +class AttnProcessorNPU: + + r""" + Processor for implementing flash attention using torch_npu. Torch_npu supports only fp16 and bf16 data types. If + fp32 is used, F.scaled_dot_product_attention will be used for computation, but the acceleration effect on NPU is + not significant. + + """ + + def __init__(self): + if not is_torch_npu_available(): + raise ImportError("AttnProcessorNPU requires torch_npu extensions and is supported only on npu devices.") + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + *args, + **kwargs, + ) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + residual = hidden_states + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + ) + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + if query.dtype in (torch.float16, torch.bfloat16): + hidden_states = torch_npu.npu_fusion_attention( + query, + key, + value, + attn.heads, + input_layout="BNSD", + pse=None, + atten_mask=attention_mask, + scale=1.0 / math.sqrt(query.shape[-1]), + pre_tockens=65536, + next_tockens=65536, + keep_prob=1.0, + sync=False, + inner_precise=0, + )[0] + else: + # TODO: add support for attn.scale when we move to Torch 2.1 + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query.dtype) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + class AttnProcessor2_0: r""" Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py index a8518ca3ff7f..373f5453aa23 100644 --- a/src/diffusers/models/modeling_utils.py +++ b/src/diffusers/models/modeling_utils.py @@ -272,6 +272,36 @@ def disable_gradient_checkpointing(self) -> None: if self._supports_gradient_checkpointing: self.apply(partial(self._set_gradient_checkpointing, value=False)) + def set_use_npu_flash_attention(self, valid: bool) -> None: + r""" + Set the switch for the npu flash attention. + """ + + def fn_recursive_set_npu_flash_attention(module: torch.nn.Module): + if hasattr(module, "set_use_npu_flash_attention"): + module.set_use_npu_flash_attention(valid) + + for child in module.children(): + fn_recursive_set_npu_flash_attention(child) + + for module in self.children(): + if isinstance(module, torch.nn.Module): + fn_recursive_set_npu_flash_attention(module) + + def enable_npu_flash_attention(self) -> None: + r""" + Enable npu flash attention from torch_npu + + """ + self.set_use_npu_flash_attention(True) + + def disable_npu_flash_attention(self) -> None: + r""" + disable npu flash attention from torch_npu + + """ + self.set_use_npu_flash_attention(False) + def set_use_memory_efficient_attention_xformers( self, valid: bool, attention_op: Optional[Callable] = None ) -> None: diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index d9e70c6dd784..59369b509876 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -30,9 +30,14 @@ from requests.exceptions import HTTPError from diffusers.models import UNet2DConditionModel -from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor +from diffusers.models.attention_processor import ( + AttnProcessor, + AttnProcessor2_0, + AttnProcessorNPU, + XFormersAttnProcessor, +) from diffusers.training_utils import EMAModel -from diffusers.utils import is_xformers_available, logging +from diffusers.utils import is_torch_npu_available, is_xformers_available, logging from diffusers.utils.testing_utils import ( CaptureLogger, get_python_version, @@ -300,6 +305,53 @@ def test_getattr_is_correct(self): assert str(error.exception) == f"'{type(model).__name__}' object has no attribute 'does_not_exist'" + @unittest.skipIf( + torch_device != "npu" or not is_torch_npu_available(), + reason="torch npu flash attention is only available with NPU and `torch_npu` installed", + ) + def test_set_torch_npu_flash_attn_processor_determinism(self): + torch.use_deterministic_algorithms(False) + if self.forward_requires_fresh_args: + model = self.model_class(**self.init_dict) + else: + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + + if not hasattr(model, "set_attn_processor"): + # If not has `set_attn_processor`, skip test + return + + model.set_default_attn_processor() + assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output = model(**self.inputs_dict(0))[0] + else: + output = model(**inputs_dict)[0] + + model.enable_npu_flash_attention() + assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_2 = model(**self.inputs_dict(0))[0] + else: + output_2 = model(**inputs_dict)[0] + + model.set_attn_processor(AttnProcessorNPU()) + assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_3 = model(**self.inputs_dict(0))[0] + else: + output_3 = model(**inputs_dict)[0] + + torch.use_deterministic_algorithms(True) + + assert torch.allclose(output, output_2, atol=self.base_precision) + assert torch.allclose(output, output_3, atol=self.base_precision) + assert torch.allclose(output_2, output_3, atol=self.base_precision) + @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", From 49b959b5408b97274e2ee423059d9239445aea26 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Fri, 3 May 2024 16:08:27 -0700 Subject: [PATCH 12/14] [docs] LCM (#7829) * lcm * lcm lora * fix * fix hfoption * edits --- docs/source/en/_toctree.yml | 6 +- .../en/using-diffusers/inference_with_lcm.md | 463 ++++++++++++++++-- .../inference_with_lcm_lora.md | 422 ---------------- 3 files changed, 412 insertions(+), 479 deletions(-) delete mode 100644 docs/source/en/using-diffusers/inference_with_lcm_lora.md diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index f2755798b792..89af55ed2a5b 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -81,16 +81,14 @@ title: ControlNet - local: using-diffusers/t2i_adapter title: T2I-Adapter + - local: using-diffusers/inference_with_lcm + title: Latent Consistency Model - local: using-diffusers/textual_inversion_inference title: Textual inversion - local: using-diffusers/shap-e title: Shap-E - local: using-diffusers/diffedit title: DiffEdit - - local: using-diffusers/inference_with_lcm_lora - title: Latent Consistency Model-LoRA - - local: using-diffusers/inference_with_lcm - title: Latent Consistency Model - local: using-diffusers/inference_with_tcd_lora title: Trajectory Consistency Distillation-LoRA - local: using-diffusers/svd diff --git a/docs/source/en/using-diffusers/inference_with_lcm.md b/docs/source/en/using-diffusers/inference_with_lcm.md index 798de67c6560..19fb349c5458 100644 --- a/docs/source/en/using-diffusers/inference_with_lcm.md +++ b/docs/source/en/using-diffusers/inference_with_lcm.md @@ -10,29 +10,30 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> +# Latent Consistency Model + [[open-in-colab]] -# Latent Consistency Model +[Latent Consistency Models (LCMs)](https://hf.co/papers/2310.04378) enable fast high-quality image generation by directly predicting the reverse diffusion process in the latent rather than pixel space. In other words, LCMs try to predict the noiseless image from the noisy image in contrast to typical diffusion models that iteratively remove noise from the noisy image. By avoiding the iterative sampling process, LCMs are able to generate high-quality images in 2-4 steps instead of 20-30 steps. -Latent Consistency Models (LCM) enable quality image generation in typically 2-4 steps making it possible to use diffusion models in almost real-time settings. +LCMs are distilled from pretrained models which requires ~32 hours of A100 compute. To speed this up, [LCM-LoRAs](https://hf.co/papers/2311.05556) train a [LoRA adapter](https://huggingface.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) which have much fewer parameters to train compared to the full model. The LCM-LoRA can be plugged into a diffusion model once it has been trained. -From the [official website](https://latent-consistency-models.github.io/): +This guide will show you how to use LCMs and LCM-LoRAs for fast inference on tasks and how to use them with other adapters like ControlNet or T2I-Adapter. -> LCMs can be distilled from any pre-trained Stable Diffusion (SD) in only 4,000 training steps (~32 A100 GPU Hours) for generating high quality 768 x 768 resolution images in 2~4 steps or even one step, significantly accelerating text-to-image generation. We employ LCM to distill the Dreamshaper-V7 version of SD in just 4,000 training iterations. +> [!TIP] +> LCMs and LCM-LoRAs are available for Stable Diffusion v1.5, Stable Diffusion XL, and the SSD-1B model. You can find their checkpoints on the [Latent Consistency](https://hf.co/collections/latent-consistency/latent-consistency-models-weights-654ce61a95edd6dffccef6a8) Collections. -For a more technical overview of LCMs, refer to [the paper](https://huggingface.co/papers/2310.04378). +## Text-to-image -LCM distilled models are available for [stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and the [SSD-1B](https://huggingface.co/segmind/SSD-1B) model. All the checkpoints can be found in this [collection](https://huggingface.co/collections/latent-consistency/latent-consistency-models-weights-654ce61a95edd6dffccef6a8). + + -This guide shows how to perform inference with LCMs for -- text-to-image -- image-to-image -- combined with style LoRAs -- ControlNet/T2I-Adapter +To use LCMs, you need to load the LCM checkpoint for your supported model into [`UNet2DConditionModel`] and replace the scheduler with the [`LCMScheduler`]. Then you can use the pipeline as usual, and pass a text prompt to generate an image in just 4 steps. -## Text-to-image +A couple of notes to keep in mind when using LCMs are: -You'll use the [`StableDiffusionXLPipeline`] pipeline with the [`LCMScheduler`] and then load the LCM-LoRA. Together with the LCM-LoRA and the scheduler, the pipeline enables a fast inference workflow, overcoming the slow iterative nature of diffusion models. +* Typically, batch size is doubled inside the pipeline for classifier-free guidance. But LCM applies guidance with guidance embeddings and doesn't need to double the batch size, which leads to faster inference. The downside is that negative prompts don't work with LCM because they don't have any effect on the denoising process. +* The ideal range for `guidance_scale` is [3., 13.] because that is what the UNet was trained with. However, disabling `guidance_scale` with a value of 1.0 is also effective in most cases. ```python from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, LCMScheduler @@ -49,31 +50,69 @@ pipe = StableDiffusionXLPipeline.from_pretrained( pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" - generator = torch.manual_seed(0) image = pipe( prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=8.0 ).images[0] +image ``` -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdxl_t2i.png) +
+ +
-Notice that we use only 4 steps for generation which is way less than what's typically used for standard SDXL. +
+ + +To use LCM-LoRAs, you need to replace the scheduler with the [`LCMScheduler`] and load the LCM-LoRA weights with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method. Then you can use the pipeline as usual, and pass a text prompt to generate an image in just 4 steps. + +A couple of notes to keep in mind when using LCM-LoRAs are: + +* Typically, batch size is doubled inside the pipeline for classifier-free guidance. But LCM applies guidance with guidance embeddings and doesn't need to double the batch size, which leads to faster inference. The downside is that negative prompts don't work with LCM because they don't have any effect on the denoising process. +* You could use guidance with LCM-LoRAs, but it is very sensitive to high `guidance_scale` values and can lead to artifacts in the generated image. The best values we've found are between [1.0, 2.0]. +* Replace [stabilityai/stable-diffusion-xl-base-1.0](https://hf.co/stabilityai/stable-diffusion-xl-base-1.0) with any finetuned model. For example, try using the [animagine-xl](https://huggingface.co/Linaqruf/animagine-xl) checkpoint to generate anime images with SDXL. + +```py +import torch +from diffusers import DiffusionPipeline, LCMScheduler + +pipe = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + variant="fp16", + torch_dtype=torch.float16 +).to("cuda") +pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) +pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") -Some details to keep in mind: +prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" +generator = torch.manual_seed(42) +image = pipe( + prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=1.0 +).images[0] +image +``` -* To perform classifier-free guidance, batch size is usually doubled inside the pipeline. LCM, however, applies guidance using guidance embeddings, so the batch size does not have to be doubled in this case. This leads to a faster inference time, with the drawback that negative prompts don't have any effect on the denoising process. -* The UNet was trained using the [3., 13.] guidance scale range. So, that is the ideal range for `guidance_scale`. However, disabling `guidance_scale` using a value of 1.0 is also effective in most cases. +
+ +
+
+
## Image-to-image -LCMs can be applied to image-to-image tasks too. For this example, we'll use the [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) model, but the same steps can be applied to other LCM models as well. + + + +To use LCMs for image-to-image, you need to load the LCM checkpoint for your supported model into [`UNet2DConditionModel`] and replace the scheduler with the [`LCMScheduler`]. Then you can use the pipeline as usual, and pass a text prompt and initial image to generate an image in just 4 steps. + +> [!TIP] +> Experiment with different values for `num_inference_steps`, `strength`, and `guidance_scale` to get the best results. ```python import torch from diffusers import AutoPipelineForImage2Image, UNet2DConditionModel, LCMScheduler -from diffusers.utils import make_image_grid, load_image +from diffusers.utils import load_image unet = UNet2DConditionModel.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", @@ -89,12 +128,8 @@ pipe = AutoPipelineForImage2Image.from_pretrained( ).to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) -# prepare image -url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -init_image = load_image(url) +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png") prompt = "Astronauts in a jungle, cold color palette, muted colors, detailed, 8k" - -# pass prompt and image to pipeline generator = torch.manual_seed(0) image = pipe( prompt, @@ -104,22 +139,130 @@ image = pipe( strength=0.5, generator=generator ).images[0] -make_image_grid([init_image, image], rows=1, cols=2) +image ``` -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdv1-5_i2i.png) +
+
+ +
initial image
+
+
+ +
generated image
+
+
+ +
+ +To use LCM-LoRAs for image-to-image, you need to replace the scheduler with the [`LCMScheduler`] and load the LCM-LoRA weights with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method. Then you can use the pipeline as usual, and pass a text prompt and initial image to generate an image in just 4 steps. - +> [!TIP] +> Experiment with different values for `num_inference_steps`, `strength`, and `guidance_scale` to get the best results. -You can get different results based on your prompt and the image you provide. To get the best results, we recommend trying different values for `num_inference_steps`, `strength`, and `guidance_scale` parameters and choose the best one. +```py +import torch +from diffusers import AutoPipelineForImage2Image, LCMScheduler +from diffusers.utils import make_image_grid, load_image + +pipe = AutoPipelineForImage2Image.from_pretrained( + "Lykon/dreamshaper-7", + torch_dtype=torch.float16, + variant="fp16", +).to("cuda") + +pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + +pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") + +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png") +prompt = "Astronauts in a jungle, cold color palette, muted colors, detailed, 8k" + +generator = torch.manual_seed(0) +image = pipe( + prompt, + image=init_image, + num_inference_steps=4, + guidance_scale=1, + strength=0.6, + generator=generator +).images[0] +image +``` - +
+
+ +
initial image
+
+
+ +
generated image
+
+
+
+
-## Combine with style LoRAs +## Inpainting -LCMs can be used with other styled LoRAs to generate styled-images in very few steps (4-8). In the following example, we'll use the [papercut LoRA](TheLastBen/Papercut_SDXL). +To use LCM-LoRAs for inpainting, you need to replace the scheduler with the [`LCMScheduler`] and load the LCM-LoRA weights with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method. Then you can use the pipeline as usual, and pass a text prompt, initial image, and mask image to generate an image in just 4 steps. + +```py +import torch +from diffusers import AutoPipelineForInpainting, LCMScheduler +from diffusers.utils import load_image, make_image_grid + +pipe = AutoPipelineForInpainting.from_pretrained( + "runwayml/stable-diffusion-inpainting", + torch_dtype=torch.float16, + variant="fp16", +).to("cuda") + +pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + +pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") + +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") + +prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" +generator = torch.manual_seed(0) +image = pipe( + prompt=prompt, + image=init_image, + mask_image=mask_image, + generator=generator, + num_inference_steps=4, + guidance_scale=4, +).images[0] +image +``` + +
+
+ +
initial image
+
+
+ +
generated image
+
+
+ +## Adapters + +LCMs are compatible with adapters like LoRA, ControlNet, T2I-Adapter, and AnimateDiff. You can bring the speed of LCMs to these adapters to generate images in a certain style or condition the model on another input like a canny image. + +### LoRA + +[LoRA](../using-diffusers/loading_adapters#lora) adapters can be rapidly finetuned to learn a new style from just a few images and plugged into a pretrained model to generate images in that style. + + + + +Load the LCM checkpoint for your supported model into [`UNet2DConditionModel`] and replace the scheduler with the [`LCMScheduler`]. Then you can use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the LoRA weights into the LCM and generate a styled image in a few steps. ```python from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, LCMScheduler @@ -134,11 +277,9 @@ pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", unet=unet, torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) - pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut") prompt = "papercut, a cute fox" - generator = torch.manual_seed(0) image = pipe( prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=8.0 @@ -146,15 +287,58 @@ image = pipe( image ``` -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdx_lora_mix.png) +
+ +
+
+ -## ControlNet/T2I-Adapter +Replace the scheduler with the [`LCMScheduler`]. Then you can use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the LCM-LoRA weights and the style LoRA you want to use. Combine both LoRA adapters with the [`~loaders.UNet2DConditionLoadersMixin.set_adapters`] method and generate a styled image in a few steps. -Let's look at how we can perform inference with ControlNet/T2I-Adapter and a LCM. +```py +import torch +from diffusers import DiffusionPipeline, LCMScheduler + +pipe = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + variant="fp16", + torch_dtype=torch.float16 +).to("cuda") + +pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + +pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm") +pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut") + +pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8]) + +prompt = "papercut, a cute fox" +generator = torch.manual_seed(0) +image = pipe(prompt, num_inference_steps=4, guidance_scale=1, generator=generator).images[0] +image +``` + +
+ +
+ +
+
### ControlNet -For this example, we'll use the [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) model with canny ControlNet, but the same steps can be applied to other LCM models as well. + +[ControlNet](./controlnet) are adapters that can be trained on a variety of inputs like canny edge, pose estimation, or depth. The ControlNet can be inserted into the pipeline to provide additional conditioning and control to the model for more accurate generation. + +You can find additional ControlNet models trained on other inputs in [lllyasviel's](https://hf.co/lllyasviel) repository. + + + + +Load a ControlNet model trained on canny images and pass it to the [`ControlNetModel`]. Then you can load a LCM model into [`StableDiffusionControlNetPipeline`] and replace the scheduler with the [`LCMScheduler`]. Now pass the canny image to the pipeline and generate an image. + +> [!TIP] +> Experiment with different values for `num_inference_steps`, `controlnet_conditioning_scale`, `cross_attention_kwargs`, and `guidance_scale` to get the best results. ```python import torch @@ -186,8 +370,6 @@ pipe = StableDiffusionControlNetPipeline.from_pretrained( torch_dtype=torch.float16, safety_checker=None, ).to("cuda") - -# set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) generator = torch.manual_seed(0) @@ -200,16 +382,84 @@ image = pipe( make_image_grid([canny_image, image], rows=1, cols=2) ``` -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdv1-5_controlnet.png) +
+ +
+
+ - -The inference parameters in this example might not work for all examples, so we recommend trying different values for the `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale`, and `cross_attention_kwargs` parameters and choosing the best one. - +Load a ControlNet model trained on canny images and pass it to the [`ControlNetModel`]. Then you can load a Stable Diffusion v1.5 model into [`StableDiffusionControlNetPipeline`] and replace the scheduler with the [`LCMScheduler`]. Use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the LCM-LoRA weights, and pass the canny image to the pipeline and generate an image. + +> [!TIP] +> Experiment with different values for `num_inference_steps`, `controlnet_conditioning_scale`, `cross_attention_kwargs`, and `guidance_scale` to get the best results. + +```py +import torch +import cv2 +import numpy as np +from PIL import Image + +from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, LCMScheduler +from diffusers.utils import load_image + +image = load_image( + "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" +).resize((512, 512)) + +image = np.array(image) + +low_threshold = 100 +high_threshold = 200 + +image = cv2.Canny(image, low_threshold, high_threshold) +image = image[:, :, None] +image = np.concatenate([image, image, image], axis=2) +canny_image = Image.fromarray(image) + +controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) +pipe = StableDiffusionControlNetPipeline.from_pretrained( + "runwayml/stable-diffusion-v1-5", + controlnet=controlnet, + torch_dtype=torch.float16, + safety_checker=None, + variant="fp16" +).to("cuda") + +pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + +pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") + +generator = torch.manual_seed(0) +image = pipe( + "the mona lisa", + image=canny_image, + num_inference_steps=4, + guidance_scale=1.5, + controlnet_conditioning_scale=0.8, + cross_attention_kwargs={"scale": 1}, + generator=generator, +).images[0] +image +``` + +
+ +
+ +
+
### T2I-Adapter -This example shows how to use the `lcm-sdxl` with the [Canny T2I-Adapter](TencentARC/t2i-adapter-canny-sdxl-1.0). +[T2I-Adapter](./t2i_adapter) is an even more lightweight adapter than ControlNet, that provides an additional input to condition a pretrained model with. It is faster than ControlNet but the results may be slightly worse. + +You can find additional T2I-Adapter checkpoints trained on other inputs in [TencentArc's](https://hf.co/TencentARC) repository. + + + + +Load a T2IAdapter trained on canny images and pass it to the [`StableDiffusionXLAdapterPipeline`]. Then load a LCM checkpoint into [`UNet2DConditionModel`] and replace the scheduler with the [`LCMScheduler`]. Now pass the canny image to the pipeline and generate an image. ```python import torch @@ -220,10 +470,9 @@ from PIL import Image from diffusers import StableDiffusionXLAdapterPipeline, UNet2DConditionModel, T2IAdapter, LCMScheduler from diffusers.utils import load_image, make_image_grid -# Prepare image -# Detect the canny map in low resolution to avoid high-frequency details +# detect the canny map in low resolution to avoid high-frequency details image = load_image( - "https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg" + "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ).resize((384, 384)) image = np.array(image) @@ -236,7 +485,6 @@ image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image).resize((1024, 1216)) -# load adapter adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda") unet = UNet2DConditionModel.from_pretrained( @@ -254,7 +502,7 @@ pipe = StableDiffusionXLAdapterPipeline.from_pretrained( pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) -prompt = "Mystical fairy in real, magic, 4k picture, high quality" +prompt = "the mona lisa, 4k picture, high quality" negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured" generator = torch.manual_seed(0) @@ -268,7 +516,116 @@ image = pipe( adapter_conditioning_factor=1, generator=generator, ).images[0] -grid = make_image_grid([canny_image, image], rows=1, cols=2) ``` -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdxl_t2iadapter.png) +
+ +
+ +
+ + +Load a T2IAdapter trained on canny images and pass it to the [`StableDiffusionXLAdapterPipeline`]. Replace the scheduler with the [`LCMScheduler`], and use the [`~loaders.LoraLoaderMixin.load_lora_weights`] method to load the LCM-LoRA weights. Pass the canny image to the pipeline and generate an image. + +```py +import torch +import cv2 +import numpy as np +from PIL import Image + +from diffusers import StableDiffusionXLAdapterPipeline, UNet2DConditionModel, T2IAdapter, LCMScheduler +from diffusers.utils import load_image, make_image_grid + +# detect the canny map in low resolution to avoid high-frequency details +image = load_image( + "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" +).resize((384, 384)) + +image = np.array(image) + +low_threshold = 100 +high_threshold = 200 + +image = cv2.Canny(image, low_threshold, high_threshold) +image = image[:, :, None] +image = np.concatenate([image, image, image], axis=2) +canny_image = Image.fromarray(image).resize((1024, 1024)) + +adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda") + +pipe = StableDiffusionXLAdapterPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + adapter=adapter, + torch_dtype=torch.float16, + variant="fp16", +).to("cuda") + +pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + +pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") + +prompt = "the mona lisa, 4k picture, high quality" +negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured" + +generator = torch.manual_seed(0) +image = pipe( + prompt=prompt, + negative_prompt=negative_prompt, + image=canny_image, + num_inference_steps=4, + guidance_scale=1.5, + adapter_conditioning_scale=0.8, + adapter_conditioning_factor=1, + generator=generator, +).images[0] +``` + +
+ +
+ +
+
+ +### AnimateDiff + +[AnimateDiff](../api/pipelines/animatediff) is an adapter that adds motion to an image. It can be used with most Stable Diffusion models, effectively turning them into "video generation" models. Generating good results with a video model usually requires generating multiple frames (16-24), which can be very slow with a regular Stable Diffusion model. LCM-LoRA can speed up this process by only taking 4-8 steps for each frame. + +Load a [`AnimateDiffPipeline`] and pass a [`MotionAdapter`] to it. Then replace the scheduler with the [`LCMScheduler`], and combine both LoRA adapters with the [`~loaders.UNet2DConditionLoadersMixin.set_adapters`] method. Now you can pass a prompt to the pipeline and generate an animated image. + +```py +import torch +from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler, LCMScheduler +from diffusers.utils import export_to_gif + +adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5") +pipe = AnimateDiffPipeline.from_pretrained( + "frankjoshua/toonyou_beta6", + motion_adapter=adapter, +).to("cuda") + +# set scheduler +pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + +# load LCM-LoRA +pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5", adapter_name="lcm") +pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-in", weight_name="diffusion_pytorch_model.safetensors", adapter_name="motion-lora") + +pipe.set_adapters(["lcm", "motion-lora"], adapter_weights=[0.55, 1.2]) + +prompt = "best quality, masterpiece, 1girl, looking at viewer, blurry background, upper body, contemporary, dress" +generator = torch.manual_seed(0) +frames = pipe( + prompt=prompt, + num_inference_steps=5, + guidance_scale=1.25, + cross_attention_kwargs={"scale": 1}, + num_frames=24, + generator=generator +).frames[0] +export_to_gif(frames, "animation.gif") +``` + +
+ +
diff --git a/docs/source/en/using-diffusers/inference_with_lcm_lora.md b/docs/source/en/using-diffusers/inference_with_lcm_lora.md deleted file mode 100644 index 36120a04828c..000000000000 --- a/docs/source/en/using-diffusers/inference_with_lcm_lora.md +++ /dev/null @@ -1,422 +0,0 @@ - - -[[open-in-colab]] - -# Performing inference with LCM-LoRA - -Latent Consistency Models (LCM) enable quality image generation in typically 2-4 steps making it possible to use diffusion models in almost real-time settings. - -From the [official website](https://latent-consistency-models.github.io/): - -> LCMs can be distilled from any pre-trained Stable Diffusion (SD) in only 4,000 training steps (~32 A100 GPU Hours) for generating high quality 768 x 768 resolution images in 2~4 steps or even one step, significantly accelerating text-to-image generation. We employ LCM to distill the Dreamshaper-V7 version of SD in just 4,000 training iterations. - -For a more technical overview of LCMs, refer to [the paper](https://huggingface.co/papers/2310.04378). - -However, each model needs to be distilled separately for latent consistency distillation. The core idea with LCM-LoRA is to train just a few adapter layers, the adapter being LoRA in this case. -This way, we don't have to train the full model and keep the number of trainable parameters manageable. The resulting LoRAs can then be applied to any fine-tuned version of the model without distilling them separately. -Additionally, the LoRAs can be applied to image-to-image, ControlNet/T2I-Adapter, inpainting, AnimateDiff etc. -The LCM-LoRA can also be combined with other LoRAs to generate styled images in very few steps (4-8). - -LCM-LoRAs are available for [stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and the [SSD-1B](https://huggingface.co/segmind/SSD-1B) model. All the checkpoints can be found in this [collection](https://huggingface.co/collections/latent-consistency/latent-consistency-models-loras-654cdd24e111e16f0865fba6). - -For more details about LCM-LoRA, refer to [the technical report](https://huggingface.co/papers/2311.05556). - -This guide shows how to perform inference with LCM-LoRAs for -- text-to-image -- image-to-image -- combined with styled LoRAs -- ControlNet/T2I-Adapter -- inpainting -- AnimateDiff - -Before going through this guide, we'll take a look at the general workflow for performing inference with LCM-LoRAs. -LCM-LoRAs are similar to other Stable Diffusion LoRAs so they can be used with any [`DiffusionPipeline`] that supports LoRAs. - -- Load the task specific pipeline and model. -- Set the scheduler to [`LCMScheduler`]. -- Load the LCM-LoRA weights for the model. -- Reduce the `guidance_scale` between `[1.0, 2.0]` and set the `num_inference_steps` between [4, 8]. -- Perform inference with the pipeline with the usual parameters. - -Let's look at how we can perform inference with LCM-LoRAs for different tasks. - -First, make sure you have [peft](https://github.com/huggingface/peft) installed, for better LoRA support. - -```bash -pip install -U peft -``` - -## Text-to-image - -You'll use the [`StableDiffusionXLPipeline`] with the scheduler: [`LCMScheduler`] and then load the LCM-LoRA. Together with the LCM-LoRA and the scheduler, the pipeline enables a fast inference workflow overcoming the slow iterative nature of diffusion models. - -```python -import torch -from diffusers import DiffusionPipeline, LCMScheduler - -pipe = DiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", - variant="fp16", - torch_dtype=torch.float16 -).to("cuda") - -# set scheduler -pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) - -# load LCM-LoRA -pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") - -prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" - -generator = torch.manual_seed(42) -image = pipe( - prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=1.0 -).images[0] -``` - -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_t2i.png) - -Notice that we use only 4 steps for generation which is way less than what's typically used for standard SDXL. - - - -You may have noticed that we set `guidance_scale=1.0`, which disables classifer-free-guidance. This is because the LCM-LoRA is trained with guidance, so the batch size does not have to be doubled in this case. This leads to a faster inference time, with the drawback that negative prompts don't have any effect on the denoising process. - -You can also use guidance with LCM-LoRA, but due to the nature of training the model is very sensitve to the `guidance_scale` values, high values can lead to artifacts in the generated images. In our experiments, we found that the best values are in the range of [1.0, 2.0]. - - - -### Inference with a fine-tuned model - -As mentioned above, the LCM-LoRA can be applied to any fine-tuned version of the model without having to distill them separately. Let's look at how we can perform inference with a fine-tuned model. In this example, we'll use the [animagine-xl](https://huggingface.co/Linaqruf/animagine-xl) model, which is a fine-tuned version of the SDXL model for generating anime. - -```python -from diffusers import DiffusionPipeline, LCMScheduler - -pipe = DiffusionPipeline.from_pretrained( - "Linaqruf/animagine-xl", - variant="fp16", - torch_dtype=torch.float16 -).to("cuda") - -# set scheduler -pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) - -# load LCM-LoRA -pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") - -prompt = "face focus, cute, masterpiece, best quality, 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck" - -generator = torch.manual_seed(0) -image = pipe( - prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=1.0 -).images[0] -``` - -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_t2i_finetuned.png) - - -## Image-to-image - -LCM-LoRA can be applied to image-to-image tasks too. Let's look at how we can perform image-to-image generation with LCMs. For this example we'll use the [dreamshaper-7](https://huggingface.co/Lykon/dreamshaper-7) model and the LCM-LoRA for `stable-diffusion-v1-5 `. - -```python -import torch -from diffusers import AutoPipelineForImage2Image, LCMScheduler -from diffusers.utils import make_image_grid, load_image - -pipe = AutoPipelineForImage2Image.from_pretrained( - "Lykon/dreamshaper-7", - torch_dtype=torch.float16, - variant="fp16", -).to("cuda") - -# set scheduler -pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) - -# load LCM-LoRA -pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") - -# prepare image -url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -init_image = load_image(url) -prompt = "Astronauts in a jungle, cold color palette, muted colors, detailed, 8k" - -# pass prompt and image to pipeline -generator = torch.manual_seed(0) -image = pipe( - prompt, - image=init_image, - num_inference_steps=4, - guidance_scale=1, - strength=0.6, - generator=generator -).images[0] -make_image_grid([init_image, image], rows=1, cols=2) -``` - -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdv1-5_i2i.png) - - - - -You can get different results based on your prompt and the image you provide. To get the best results, we recommend trying different values for `num_inference_steps`, `strength`, and `guidance_scale` parameters and choose the best one. - - - - -## Combine with styled LoRAs - -LCM-LoRA can be combined with other LoRAs to generate styled-images in very few steps (4-8). In the following example, we'll use the LCM-LoRA with the [papercut LoRA](TheLastBen/Papercut_SDXL). -To learn more about how to combine LoRAs, refer to [this guide](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference#combine-multiple-adapters). - -```python -import torch -from diffusers import DiffusionPipeline, LCMScheduler - -pipe = DiffusionPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", - variant="fp16", - torch_dtype=torch.float16 -).to("cuda") - -# set scheduler -pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) - -# load LoRAs -pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm") -pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut") - -# Combine LoRAs -pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8]) - -prompt = "papercut, a cute fox" -generator = torch.manual_seed(0) -image = pipe(prompt, num_inference_steps=4, guidance_scale=1, generator=generator).images[0] -image -``` - -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdx_lora_mix.png) - - -## ControlNet/T2I-Adapter - -Let's look at how we can perform inference with ControlNet/T2I-Adapter and LCM-LoRA. - -### ControlNet -For this example, we'll use the SD-v1-5 model and the LCM-LoRA for SD-v1-5 with canny ControlNet. - -```python -import torch -import cv2 -import numpy as np -from PIL import Image - -from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, LCMScheduler -from diffusers.utils import load_image - -image = load_image( - "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" -).resize((512, 512)) - -image = np.array(image) - -low_threshold = 100 -high_threshold = 200 - -image = cv2.Canny(image, low_threshold, high_threshold) -image = image[:, :, None] -image = np.concatenate([image, image, image], axis=2) -canny_image = Image.fromarray(image) - -controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) -pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", - controlnet=controlnet, - torch_dtype=torch.float16, - safety_checker=None, - variant="fp16" -).to("cuda") - -# set scheduler -pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) - -# load LCM-LoRA -pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") - -generator = torch.manual_seed(0) -image = pipe( - "the mona lisa", - image=canny_image, - num_inference_steps=4, - guidance_scale=1.5, - controlnet_conditioning_scale=0.8, - cross_attention_kwargs={"scale": 1}, - generator=generator, -).images[0] -make_image_grid([canny_image, image], rows=1, cols=2) -``` - -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdv1-5_controlnet.png) - - - -The inference parameters in this example might not work for all examples, so we recommend you to try different values for `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale` and `cross_attention_kwargs` parameters and choose the best one. - - -### T2I-Adapter - -This example shows how to use the LCM-LoRA with the [Canny T2I-Adapter](TencentARC/t2i-adapter-canny-sdxl-1.0) and SDXL. - -```python -import torch -import cv2 -import numpy as np -from PIL import Image - -from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, LCMScheduler -from diffusers.utils import load_image, make_image_grid - -# Prepare image -# Detect the canny map in low resolution to avoid high-frequency details -image = load_image( - "https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg" -).resize((384, 384)) - -image = np.array(image) - -low_threshold = 100 -high_threshold = 200 - -image = cv2.Canny(image, low_threshold, high_threshold) -image = image[:, :, None] -image = np.concatenate([image, image, image], axis=2) -canny_image = Image.fromarray(image).resize((1024, 1024)) - -# load adapter -adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda") - -pipe = StableDiffusionXLAdapterPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-base-1.0", - adapter=adapter, - torch_dtype=torch.float16, - variant="fp16", -).to("cuda") - -# set scheduler -pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) - -# load LCM-LoRA -pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl") - -prompt = "Mystical fairy in real, magic, 4k picture, high quality" -negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured" - -generator = torch.manual_seed(0) -image = pipe( - prompt=prompt, - negative_prompt=negative_prompt, - image=canny_image, - num_inference_steps=4, - guidance_scale=1.5, - adapter_conditioning_scale=0.8, - adapter_conditioning_factor=1, - generator=generator, -).images[0] -make_image_grid([canny_image, image], rows=1, cols=2) -``` - -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdxl_t2iadapter.png) - - -## Inpainting - -LCM-LoRA can be used for inpainting as well. - -```python -import torch -from diffusers import AutoPipelineForInpainting, LCMScheduler -from diffusers.utils import load_image, make_image_grid - -pipe = AutoPipelineForInpainting.from_pretrained( - "runwayml/stable-diffusion-inpainting", - torch_dtype=torch.float16, - variant="fp16", -).to("cuda") - -# set scheduler -pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) - -# load LCM-LoRA -pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") - -# load base and mask image -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") - -# generator = torch.Generator("cuda").manual_seed(92) -prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" -generator = torch.manual_seed(0) -image = pipe( - prompt=prompt, - image=init_image, - mask_image=mask_image, - generator=generator, - num_inference_steps=4, - guidance_scale=4, -).images[0] -make_image_grid([init_image, mask_image, image], rows=1, cols=3) -``` - -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdv1-5_inpainting.png) - - -## AnimateDiff - -[`AnimateDiff`] allows you to animate images using Stable Diffusion models. To get good results, we need to generate multiple frames (16-24), and doing this with standard SD models can be very slow. -LCM-LoRA can be used to speed up the process significantly, as you just need to do 4-8 steps for each frame. Let's look at how we can perform animation with LCM-LoRA and AnimateDiff. - -```python -import torch -from diffusers import MotionAdapter, AnimateDiffPipeline, DDIMScheduler, LCMScheduler -from diffusers.utils import export_to_gif - -adapter = MotionAdapter.from_pretrained("diffusers/animatediff-motion-adapter-v1-5") -pipe = AnimateDiffPipeline.from_pretrained( - "frankjoshua/toonyou_beta6", - motion_adapter=adapter, -).to("cuda") - -# set scheduler -pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) - -# load LCM-LoRA -pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5", adapter_name="lcm") -pipe.load_lora_weights("guoyww/animatediff-motion-lora-zoom-in", weight_name="diffusion_pytorch_model.safetensors", adapter_name="motion-lora") - -pipe.set_adapters(["lcm", "motion-lora"], adapter_weights=[0.55, 1.2]) - -prompt = "best quality, masterpiece, 1girl, looking at viewer, blurry background, upper body, contemporary, dress" -generator = torch.manual_seed(0) -frames = pipe( - prompt=prompt, - num_inference_steps=5, - guidance_scale=1.25, - cross_attention_kwargs={"scale": 1}, - num_frames=24, - generator=generator -).frames[0] -export_to_gif(frames, "animation.gif") -``` - -![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_sdv1-5_animatediff.gif) \ No newline at end of file From 7fa3e5b0f6a593d06e65f5b40ccd46acfafcfeb1 Mon Sep 17 00:00:00 2001 From: Guillaume LEGENDRE Date: Mon, 6 May 2024 14:25:24 +0200 Subject: [PATCH 13/14] Ci - change cache folder (#7867) --- .github/workflows/push_tests.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/push_tests.yml b/.github/workflows/push_tests.yml index 00491e54b738..d071af2b0be2 100644 --- a/.github/workflows/push_tests.yml +++ b/.github/workflows/push_tests.yml @@ -61,7 +61,7 @@ jobs: runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-cuda - options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 --privileged + options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0 --privileged steps: - name: Checkout diffusers uses: actions/checkout@v3 @@ -118,7 +118,7 @@ jobs: runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-cuda - options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 + options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0 defaults: run: shell: bash @@ -170,7 +170,7 @@ jobs: runs-on: [single-gpu, nvidia-gpu, t4, ci] container: image: diffusers/diffusers-pytorch-cuda - options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0 + options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0 defaults: run: shell: bash From 0d23645bd120e6785a3d81ee8e053afe84bbc77f Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Mon, 6 May 2024 15:07:25 -0700 Subject: [PATCH 14/14] [docs] Distilled inference (#7834) * combine * edits --- docs/source/en/_toctree.yml | 2 - docs/source/en/optimization/fp16.md | 107 ++++++++++---- .../source/en/using-diffusers/distilled_sd.md | 133 ------------------ 3 files changed, 80 insertions(+), 162 deletions(-) delete mode 100644 docs/source/en/using-diffusers/distilled_sd.md diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 89af55ed2a5b..1c21d4cd9f74 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -139,8 +139,6 @@ - sections: - local: optimization/fp16 title: Speed up inference - - local: using-diffusers/distilled_sd - title: Distilled Stable Diffusion inference - local: optimization/memory title: Reduce memory usage - local: optimization/torch2.0 diff --git a/docs/source/en/optimization/fp16.md b/docs/source/en/optimization/fp16.md index 7a2cf934985c..b21b61368826 100644 --- a/docs/source/en/optimization/fp16.md +++ b/docs/source/en/optimization/fp16.md @@ -12,27 +12,23 @@ specific language governing permissions and limitations under the License. # Speed up inference -There are several ways to optimize 🤗 Diffusers for inference speed. As a general rule of thumb, we recommend using either [xFormers](xformers) or `torch.nn.functional.scaled_dot_product_attention` in PyTorch 2.0 for their memory-efficient attention. +There are several ways to optimize Diffusers for inference speed, such as reducing the computational burden by lowering the data precision or using a lightweight distilled model. There are also memory-efficient attention implementations, [xFormers](xformers) and [scaled dot product attetntion](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) in PyTorch 2.0, that reduce memory usage which also indirectly speeds up inference. Different speed optimizations can be stacked together to get the fastest inference times. - +> [!TIP] +> Optimizing for inference speed or reduced memory usage can lead to improved performance in the other category, so you should try to optimize for both whenever you can. This guide focuses on inference speed, but you can learn more about lowering memory usage in the [Reduce memory usage](memory) guide. -In many cases, optimizing for speed or memory leads to improved performance in the other, so you should try to optimize for both whenever you can. This guide focuses on inference speed, but you can learn more about preserving memory in the [Reduce memory usage](memory) guide. +The inference times below are obtained from generating a single 512x512 image from the prompt "a photo of an astronaut riding a horse on mars" with 50 DDIM steps on a NVIDIA A100. - +| setup | latency | speed-up | +|----------|---------|----------| +| baseline | 5.27s | x1 | +| tf32 | 4.14s | x1.27 | +| fp16 | 3.51s | x1.50 | +| combined | 3.41s | x1.54 | -The results below are obtained from generating a single 512x512 image from the prompt `a photo of an astronaut riding a horse on mars` with 50 DDIM steps on a Nvidia Titan RTX, demonstrating the speed-up you can expect. +## TensorFloat-32 -| | latency | speed-up | -| ---------------- | ------- | ------- | -| original | 9.50s | x1 | -| fp16 | 3.61s | x2.63 | -| channels last | 3.30s | x2.88 | -| traced UNet | 3.21s | x2.96 | -| memory efficient attention | 2.63s | x3.61 | - -## Use TensorFloat-32 - -On Ampere and later CUDA devices, matrix multiplications and convolutions can use the [TensorFloat-32 (TF32)](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) mode for faster, but slightly less accurate computations. By default, PyTorch enables TF32 mode for convolutions but not matrix multiplications. Unless your network requires full float32 precision, we recommend enabling TF32 for matrix multiplications. It can significantly speeds up computations with typically negligible loss in numerical accuracy. +On Ampere and later CUDA devices, matrix multiplications and convolutions can use the [TensorFloat-32 (tf32)](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) mode for faster, but slightly less accurate computations. By default, PyTorch enables tf32 mode for convolutions but not matrix multiplications. Unless your network requires full float32 precision, we recommend enabling tf32 for matrix multiplications. It can significantly speed up computations with typically negligible loss in numerical accuracy. ```python import torch @@ -40,11 +36,11 @@ import torch torch.backends.cuda.matmul.allow_tf32 = True ``` -You can learn more about TF32 in the [Mixed precision training](https://huggingface.co/docs/transformers/en/perf_train_gpu_one#tf32) guide. +Learn more about tf32 in the [Mixed precision training](https://huggingface.co/docs/transformers/en/perf_train_gpu_one#tf32) guide. ## Half-precision weights -To save GPU memory and get more speed, try loading and running the model weights directly in half-precision or float16: +To save GPU memory and get more speed, set `torch_dtype=torch.float16` to load and run the model weights directly with half-precision weights. ```Python import torch @@ -56,19 +52,76 @@ pipe = DiffusionPipeline.from_pretrained( use_safetensors=True, ) pipe = pipe.to("cuda") - -prompt = "a photo of an astronaut riding a horse on mars" -image = pipe(prompt).images[0] ``` - +> [!WARNING] +> Don't use [torch.autocast](https://pytorch.org/docs/stable/amp.html#torch.autocast) in any of the pipelines as it can lead to black images and is always slower than pure float16 precision. -Don't use [`torch.autocast`](https://pytorch.org/docs/stable/amp.html#torch.autocast) in any of the pipelines as it can lead to black images and is always slower than pure float16 precision. +## Distilled model - +You could also use a distilled Stable Diffusion model and autoencoder to speed up inference. During distillation, many of the UNet's residual and attention blocks are shed to reduce the model size by 51% and improve latency on CPU/GPU by 43%. The distilled model is faster and uses less memory while generating images of comparable quality to the full Stable Diffusion model. -## Distilled model +> [!TIP] +> Read the [Open-sourcing Knowledge Distillation Code and Weights of SD-Small and SD-Tiny](https://huggingface.co/blog/sd_distillation) blog post to learn more about how knowledge distillation training works to produce a faster, smaller, and cheaper generative model. -You could also use a distilled Stable Diffusion model and autoencoder to speed up inference. During distillation, many of the UNet's residual and attention blocks are shed to reduce the model size. The distilled model is faster and uses less memory while generating images of comparable quality to the full Stable Diffusion model. +The inference times below are obtained from generating 4 images from the prompt "a photo of an astronaut riding a horse on mars" with 25 PNDM steps on a NVIDIA A100. Each generation is repeated 3 times with the distilled Stable Diffusion v1.4 model by [Nota AI](https://hf.co/nota-ai). + +| setup | latency | speed-up | +|------------------------------|---------|----------| +| baseline | 6.37s | x1 | +| distilled | 4.18s | x1.52 | +| distilled + tiny autoencoder | 3.83s | x1.66 | + +Let's load the distilled Stable Diffusion model and compare it against the original Stable Diffusion model. + +```py +from diffusers import StableDiffusionPipeline +import torch + +distilled = StableDiffusionPipeline.from_pretrained( + "nota-ai/bk-sdm-small", torch_dtype=torch.float16, use_safetensors=True, +).to("cuda") +prompt = "a golden vase with different flowers" +generator = torch.manual_seed(2023) +image = distilled("a golden vase with different flowers", num_inference_steps=25, generator=generator).images[0] +image +``` + +
+
+ +
original Stable Diffusion
+
+
+ +
distilled Stable Diffusion
+
+
+ +### Tiny AutoEncoder + +To speed inference up even more, replace the autoencoder with a [distilled version](https://huggingface.co/sayakpaul/taesdxl-diffusers) of it. + +```py +import torch +from diffusers import AutoencoderTiny, StableDiffusionPipeline + +distilled = StableDiffusionPipeline.from_pretrained( + "nota-ai/bk-sdm-small", torch_dtype=torch.float16, use_safetensors=True, +).to("cuda") +distilled.vae = AutoencoderTiny.from_pretrained( + "sayakpaul/taesd-diffusers", torch_dtype=torch.float16, use_safetensors=True, +).to("cuda") + +prompt = "a golden vase with different flowers" +generator = torch.manual_seed(2023) +image = distilled("a golden vase with different flowers", num_inference_steps=25, generator=generator).images[0] +image +``` -Learn more about in the [Distilled Stable Diffusion inference](../using-diffusers/distilled_sd) guide! +
+
+ +
distilled Stable Diffusion + Tiny AutoEncoder
+
+
diff --git a/docs/source/en/using-diffusers/distilled_sd.md b/docs/source/en/using-diffusers/distilled_sd.md deleted file mode 100644 index c4c5f7ad1998..000000000000 --- a/docs/source/en/using-diffusers/distilled_sd.md +++ /dev/null @@ -1,133 +0,0 @@ - - -# Distilled Stable Diffusion inference - -[[open-in-colab]] - -Stable Diffusion inference can be a computationally intensive process because it must iteratively denoise the latents to generate an image. To reduce the computational burden, you can use a *distilled* version of the Stable Diffusion model from [Nota AI](https://huggingface.co/nota-ai). The distilled version of their Stable Diffusion model eliminates some of the residual and attention blocks from the UNet, reducing the model size by 51% and improving latency on CPU/GPU by 43%. - - - -Read this [blog post](https://huggingface.co/blog/sd_distillation) to learn more about how knowledge distillation training works to produce a faster, smaller, and cheaper generative model. - - - -Let's load the distilled Stable Diffusion model and compare it against the original Stable Diffusion model: - -```py -from diffusers import StableDiffusionPipeline -import torch - -distilled = StableDiffusionPipeline.from_pretrained( - "nota-ai/bk-sdm-small", torch_dtype=torch.float16, use_safetensors=True, -).to("cuda") - -original = StableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, use_safetensors=True, -).to("cuda") -``` - -Given a prompt, get the inference time for the original model: - -```py -import time - -seed = 2023 -generator = torch.manual_seed(seed) - -NUM_ITERS_TO_RUN = 3 -NUM_INFERENCE_STEPS = 25 -NUM_IMAGES_PER_PROMPT = 4 - -prompt = "a golden vase with different flowers" - -start = time.time_ns() -for _ in range(NUM_ITERS_TO_RUN): - images = original( - prompt, - num_inference_steps=NUM_INFERENCE_STEPS, - generator=generator, - num_images_per_prompt=NUM_IMAGES_PER_PROMPT - ).images -end = time.time_ns() -original_sd = f"{(end - start) / 1e6:.1f}" - -print(f"Execution time -- {original_sd} ms\n") -"Execution time -- 45781.5 ms" -``` - -Time the distilled model inference: - -```py -start = time.time_ns() -for _ in range(NUM_ITERS_TO_RUN): - images = distilled( - prompt, - num_inference_steps=NUM_INFERENCE_STEPS, - generator=generator, - num_images_per_prompt=NUM_IMAGES_PER_PROMPT - ).images -end = time.time_ns() - -distilled_sd = f"{(end - start) / 1e6:.1f}" -print(f"Execution time -- {distilled_sd} ms\n") -"Execution time -- 29884.2 ms" -``` - -
-
- -
original Stable Diffusion (45781.5 ms)
-
-
- -
distilled Stable Diffusion (29884.2 ms)
-
-
- -## Tiny AutoEncoder - -To speed inference up even more, use a tiny distilled version of the [Stable Diffusion VAE](https://huggingface.co/sayakpaul/taesdxl-diffusers) to denoise the latents into images. Replace the VAE in the distilled Stable Diffusion model with the tiny VAE: - -```py -from diffusers import AutoencoderTiny - -distilled.vae = AutoencoderTiny.from_pretrained( - "sayakpaul/taesd-diffusers", torch_dtype=torch.float16, use_safetensors=True, -).to("cuda") -``` - -Time the distilled model and distilled VAE inference: - -```py -start = time.time_ns() -for _ in range(NUM_ITERS_TO_RUN): - images = distilled( - prompt, - num_inference_steps=NUM_INFERENCE_STEPS, - generator=generator, - num_images_per_prompt=NUM_IMAGES_PER_PROMPT - ).images -end = time.time_ns() - -distilled_tiny_sd = f"{(end - start) / 1e6:.1f}" -print(f"Execution time -- {distilled_tiny_sd} ms\n") -"Execution time -- 27165.7 ms" -``` - -
-
- -
distilled Stable Diffusion + Tiny AutoEncoder (27165.7 ms)
-
-