OriLib commited on
Commit
d29ef6c
1 Parent(s): 69b04c4

Upload 4 files

Browse files
replace_bg/model/controlnet.py ADDED
@@ -0,0 +1,871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Any, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ from torch import nn
19
+ from torch.nn import functional as F
20
+
21
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
22
+ from diffusers.loaders import FromOriginalControlNetMixin
23
+ from diffusers.utils import BaseOutput, logging
24
+ from diffusers.models.attention_processor import (
25
+ ADDED_KV_ATTENTION_PROCESSORS,
26
+ CROSS_ATTENTION_PROCESSORS,
27
+ AttentionProcessor,
28
+ AttnAddedKVProcessor,
29
+ AttnProcessor,
30
+ )
31
+ from diffusers.models.embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps
32
+ from diffusers.models.modeling_utils import ModelMixin
33
+ from diffusers.models.unets.unet_2d_blocks import (
34
+ CrossAttnDownBlock2D,
35
+ DownBlock2D,
36
+ UNetMidBlock2D,
37
+ UNetMidBlock2DCrossAttn,
38
+ get_down_block,
39
+ )
40
+ from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
41
+
42
+
43
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
44
+
45
+
46
+ @dataclass
47
+ class ControlNetOutput(BaseOutput):
48
+ """
49
+ The output of [`ControlNetModel`].
50
+
51
+ Args:
52
+ down_block_res_samples (`tuple[torch.Tensor]`):
53
+ A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should
54
+ be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be
55
+ used to condition the original UNet's downsampling activations.
56
+ mid_down_block_re_sample (`torch.Tensor`):
57
+ The activation of the midde block (the lowest sample resolution). Each tensor should be of shape
58
+ `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`.
59
+ Output can be used to condition the original UNet's middle block activation.
60
+ """
61
+
62
+ down_block_res_samples: Tuple[torch.Tensor]
63
+ mid_block_res_sample: torch.Tensor
64
+
65
+
66
+ class ControlNetConditioningEmbedding(nn.Module):
67
+ """
68
+ Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
69
+ [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
70
+ training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
71
+ convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
72
+ (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
73
+ model) to encode image-space conditions ... into feature maps ..."
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ conditioning_embedding_channels: int,
79
+ conditioning_channels: int = 5, #update to 5
80
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
81
+ ):
82
+ super().__init__()
83
+
84
+ self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
85
+
86
+ self.blocks = nn.ModuleList([])
87
+
88
+ for i in range(len(block_out_channels) - 1):
89
+ channel_in = block_out_channels[i]
90
+ channel_out = block_out_channels[i + 1]
91
+ self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
92
+ self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=1)) # update to 1
93
+
94
+ self.conv_out = zero_module(
95
+ nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
96
+ )
97
+
98
+ def forward(self, conditioning):
99
+ embedding = self.conv_in(conditioning)
100
+ embedding = F.silu(embedding)
101
+
102
+ for block in self.blocks:
103
+ embedding = block(embedding)
104
+ embedding = F.silu(embedding)
105
+
106
+ embedding = self.conv_out(embedding)
107
+
108
+ return embedding
109
+
110
+
111
+ class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin):
112
+ """
113
+ A ControlNet model.
114
+
115
+ Args:
116
+ in_channels (`int`, defaults to 4):
117
+ The number of channels in the input sample.
118
+ flip_sin_to_cos (`bool`, defaults to `True`):
119
+ Whether to flip the sin to cos in the time embedding.
120
+ freq_shift (`int`, defaults to 0):
121
+ The frequency shift to apply to the time embedding.
122
+ down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
123
+ The tuple of downsample blocks to use.
124
+ only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`):
125
+ block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`):
126
+ The tuple of output channels for each block.
127
+ layers_per_block (`int`, defaults to 2):
128
+ The number of layers per block.
129
+ downsample_padding (`int`, defaults to 1):
130
+ The padding to use for the downsampling convolution.
131
+ mid_block_scale_factor (`float`, defaults to 1):
132
+ The scale factor to use for the mid block.
133
+ act_fn (`str`, defaults to "silu"):
134
+ The activation function to use.
135
+ norm_num_groups (`int`, *optional*, defaults to 32):
136
+ The number of groups to use for the normalization. If None, normalization and activation layers is skipped
137
+ in post-processing.
138
+ norm_eps (`float`, defaults to 1e-5):
139
+ The epsilon to use for the normalization.
140
+ cross_attention_dim (`int`, defaults to 1280):
141
+ The dimension of the cross attention features.
142
+ transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
143
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
144
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
145
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
146
+ encoder_hid_dim (`int`, *optional*, defaults to None):
147
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
148
+ dimension to `cross_attention_dim`.
149
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
150
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
151
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
152
+ attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8):
153
+ The dimension of the attention heads.
154
+ use_linear_projection (`bool`, defaults to `False`):
155
+ class_embed_type (`str`, *optional*, defaults to `None`):
156
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None,
157
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
158
+ addition_embed_type (`str`, *optional*, defaults to `None`):
159
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
160
+ "text". "text" will use the `TextTimeEmbedding` layer.
161
+ num_class_embeds (`int`, *optional*, defaults to 0):
162
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
163
+ class conditioning with `class_embed_type` equal to `None`.
164
+ upcast_attention (`bool`, defaults to `False`):
165
+ resnet_time_scale_shift (`str`, defaults to `"default"`):
166
+ Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`.
167
+ projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`):
168
+ The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when
169
+ `class_embed_type="projection"`.
170
+ controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
171
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
172
+ conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`):
173
+ The tuple of output channel for each block in the `conditioning_embedding` layer.
174
+ global_pool_conditions (`bool`, defaults to `False`):
175
+ TODO(Patrick) - unused parameter.
176
+ addition_embed_type_num_heads (`int`, defaults to 64):
177
+ The number of heads to use for the `TextTimeEmbedding` layer.
178
+ """
179
+
180
+ _supports_gradient_checkpointing = True
181
+
182
+ @register_to_config
183
+ def __init__(
184
+ self,
185
+ in_channels: int = 4,
186
+ conditioning_channels: int = 3,
187
+ flip_sin_to_cos: bool = True,
188
+ freq_shift: int = 0,
189
+ down_block_types: Tuple[str, ...] = (
190
+ "CrossAttnDownBlock2D",
191
+ "CrossAttnDownBlock2D",
192
+ "CrossAttnDownBlock2D",
193
+ "DownBlock2D",
194
+ ),
195
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
196
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
197
+ block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280),
198
+ layers_per_block: int = 2,
199
+ downsample_padding: int = 1,
200
+ mid_block_scale_factor: float = 1,
201
+ act_fn: str = "silu",
202
+ norm_num_groups: Optional[int] = 32,
203
+ norm_eps: float = 1e-5,
204
+ cross_attention_dim: int = 1280,
205
+ transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1,
206
+ encoder_hid_dim: Optional[int] = None,
207
+ encoder_hid_dim_type: Optional[str] = None,
208
+ attention_head_dim: Union[int, Tuple[int, ...]] = 8,
209
+ num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None,
210
+ use_linear_projection: bool = False,
211
+ class_embed_type: Optional[str] = None,
212
+ addition_embed_type: Optional[str] = None,
213
+ addition_time_embed_dim: Optional[int] = None,
214
+ num_class_embeds: Optional[int] = None,
215
+ upcast_attention: bool = False,
216
+ resnet_time_scale_shift: str = "default",
217
+ projection_class_embeddings_input_dim: Optional[int] = None,
218
+ controlnet_conditioning_channel_order: str = "rgb",
219
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
220
+ global_pool_conditions: bool = False,
221
+ addition_embed_type_num_heads: int = 64,
222
+ ):
223
+ super().__init__()
224
+
225
+ # If `num_attention_heads` is not defined (which is the case for most models)
226
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
227
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
228
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
229
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
230
+ # which is why we correct for the naming here.
231
+ num_attention_heads = num_attention_heads or attention_head_dim
232
+
233
+ # Check inputs
234
+ if len(block_out_channels) != len(down_block_types):
235
+ raise ValueError(
236
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
237
+ )
238
+
239
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
240
+ raise ValueError(
241
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
242
+ )
243
+
244
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
245
+ raise ValueError(
246
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
247
+ )
248
+
249
+ if isinstance(transformer_layers_per_block, int):
250
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
251
+
252
+ # input
253
+ conv_in_kernel = 3
254
+ conv_in_padding = (conv_in_kernel - 1) // 2
255
+ self.conv_in = nn.Conv2d(
256
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
257
+ )
258
+
259
+ # time
260
+ time_embed_dim = block_out_channels[0] * 4
261
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
262
+ timestep_input_dim = block_out_channels[0]
263
+ self.time_embedding = TimestepEmbedding(
264
+ timestep_input_dim,
265
+ time_embed_dim,
266
+ act_fn=act_fn,
267
+ )
268
+
269
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
270
+ encoder_hid_dim_type = "text_proj"
271
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
272
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
273
+
274
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
275
+ raise ValueError(
276
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
277
+ )
278
+
279
+ if encoder_hid_dim_type == "text_proj":
280
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
281
+ elif encoder_hid_dim_type == "text_image_proj":
282
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
283
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
284
+ # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
285
+ self.encoder_hid_proj = TextImageProjection(
286
+ text_embed_dim=encoder_hid_dim,
287
+ image_embed_dim=cross_attention_dim,
288
+ cross_attention_dim=cross_attention_dim,
289
+ )
290
+
291
+ elif encoder_hid_dim_type is not None:
292
+ raise ValueError(
293
+ f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
294
+ )
295
+ else:
296
+ self.encoder_hid_proj = None
297
+
298
+ # class embedding
299
+ if class_embed_type is None and num_class_embeds is not None:
300
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
301
+ elif class_embed_type == "timestep":
302
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
303
+ elif class_embed_type == "identity":
304
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
305
+ elif class_embed_type == "projection":
306
+ if projection_class_embeddings_input_dim is None:
307
+ raise ValueError(
308
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
309
+ )
310
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
311
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
312
+ # 2. it projects from an arbitrary input dimension.
313
+ #
314
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
315
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
316
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
317
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
318
+ else:
319
+ self.class_embedding = None
320
+
321
+ if addition_embed_type == "text":
322
+ if encoder_hid_dim is not None:
323
+ text_time_embedding_from_dim = encoder_hid_dim
324
+ else:
325
+ text_time_embedding_from_dim = cross_attention_dim
326
+
327
+ self.add_embedding = TextTimeEmbedding(
328
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
329
+ )
330
+ elif addition_embed_type == "text_image":
331
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
332
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
333
+ # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
334
+ self.add_embedding = TextImageTimeEmbedding(
335
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
336
+ )
337
+ elif addition_embed_type == "text_time":
338
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
339
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
340
+
341
+ elif addition_embed_type is not None:
342
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
343
+
344
+ # control net conditioning embedding
345
+ self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
346
+ conditioning_embedding_channels=block_out_channels[0],
347
+ block_out_channels=conditioning_embedding_out_channels,
348
+ conditioning_channels=conditioning_channels,
349
+ )
350
+
351
+ self.down_blocks = nn.ModuleList([])
352
+ self.controlnet_down_blocks = nn.ModuleList([])
353
+
354
+ if isinstance(only_cross_attention, bool):
355
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
356
+
357
+ if isinstance(attention_head_dim, int):
358
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
359
+
360
+ if isinstance(num_attention_heads, int):
361
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
362
+
363
+ # down
364
+ output_channel = block_out_channels[0]
365
+
366
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
367
+ controlnet_block = zero_module(controlnet_block)
368
+ self.controlnet_down_blocks.append(controlnet_block)
369
+
370
+ for i, down_block_type in enumerate(down_block_types):
371
+ input_channel = output_channel
372
+ output_channel = block_out_channels[i]
373
+ is_final_block = i == len(block_out_channels) - 1
374
+
375
+ down_block = get_down_block(
376
+ down_block_type,
377
+ num_layers=layers_per_block,
378
+ transformer_layers_per_block=transformer_layers_per_block[i],
379
+ in_channels=input_channel,
380
+ out_channels=output_channel,
381
+ temb_channels=time_embed_dim,
382
+ add_downsample=not is_final_block,
383
+ resnet_eps=norm_eps,
384
+ resnet_act_fn=act_fn,
385
+ resnet_groups=norm_num_groups,
386
+ cross_attention_dim=cross_attention_dim,
387
+ num_attention_heads=num_attention_heads[i],
388
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
389
+ downsample_padding=downsample_padding,
390
+ use_linear_projection=use_linear_projection,
391
+ only_cross_attention=only_cross_attention[i],
392
+ upcast_attention=upcast_attention,
393
+ resnet_time_scale_shift=resnet_time_scale_shift,
394
+ )
395
+ self.down_blocks.append(down_block)
396
+
397
+ for _ in range(layers_per_block):
398
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
399
+ controlnet_block = zero_module(controlnet_block)
400
+ self.controlnet_down_blocks.append(controlnet_block)
401
+
402
+ if not is_final_block:
403
+ controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
404
+ controlnet_block = zero_module(controlnet_block)
405
+ self.controlnet_down_blocks.append(controlnet_block)
406
+
407
+ # mid
408
+ mid_block_channel = block_out_channels[-1]
409
+
410
+ controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)
411
+ controlnet_block = zero_module(controlnet_block)
412
+ self.controlnet_mid_block = controlnet_block
413
+
414
+ if mid_block_type == "UNetMidBlock2DCrossAttn":
415
+ self.mid_block = UNetMidBlock2DCrossAttn(
416
+ transformer_layers_per_block=transformer_layers_per_block[-1],
417
+ in_channels=mid_block_channel,
418
+ temb_channels=time_embed_dim,
419
+ resnet_eps=norm_eps,
420
+ resnet_act_fn=act_fn,
421
+ output_scale_factor=mid_block_scale_factor,
422
+ resnet_time_scale_shift=resnet_time_scale_shift,
423
+ cross_attention_dim=cross_attention_dim,
424
+ num_attention_heads=num_attention_heads[-1],
425
+ resnet_groups=norm_num_groups,
426
+ use_linear_projection=use_linear_projection,
427
+ upcast_attention=upcast_attention,
428
+ )
429
+ elif mid_block_type == "UNetMidBlock2D":
430
+ self.mid_block = UNetMidBlock2D(
431
+ in_channels=block_out_channels[-1],
432
+ temb_channels=time_embed_dim,
433
+ num_layers=0,
434
+ resnet_eps=norm_eps,
435
+ resnet_act_fn=act_fn,
436
+ output_scale_factor=mid_block_scale_factor,
437
+ resnet_groups=norm_num_groups,
438
+ resnet_time_scale_shift=resnet_time_scale_shift,
439
+ add_attention=False,
440
+ )
441
+ else:
442
+ raise ValueError(f"unknown mid_block_type : {mid_block_type}")
443
+
444
+ @classmethod
445
+ def from_unet(
446
+ cls,
447
+ unet: UNet2DConditionModel,
448
+ controlnet_conditioning_channel_order: str = "rgb",
449
+ conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256),
450
+ load_weights_from_unet: bool = True,
451
+ conditioning_channels: int = 3,
452
+ ):
453
+ r"""
454
+ Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`].
455
+
456
+ Parameters:
457
+ unet (`UNet2DConditionModel`):
458
+ The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied
459
+ where applicable.
460
+ """
461
+ transformer_layers_per_block = (
462
+ unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1
463
+ )
464
+ encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None
465
+ encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None
466
+ addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None
467
+ addition_time_embed_dim = (
468
+ unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None
469
+ )
470
+
471
+ controlnet = cls(
472
+ encoder_hid_dim=encoder_hid_dim,
473
+ encoder_hid_dim_type=encoder_hid_dim_type,
474
+ addition_embed_type=addition_embed_type,
475
+ addition_time_embed_dim=addition_time_embed_dim,
476
+ transformer_layers_per_block=transformer_layers_per_block,
477
+ in_channels=unet.config.in_channels,
478
+ flip_sin_to_cos=unet.config.flip_sin_to_cos,
479
+ freq_shift=unet.config.freq_shift,
480
+ down_block_types=unet.config.down_block_types,
481
+ only_cross_attention=unet.config.only_cross_attention,
482
+ block_out_channels=unet.config.block_out_channels,
483
+ layers_per_block=unet.config.layers_per_block,
484
+ downsample_padding=unet.config.downsample_padding,
485
+ mid_block_scale_factor=unet.config.mid_block_scale_factor,
486
+ act_fn=unet.config.act_fn,
487
+ norm_num_groups=unet.config.norm_num_groups,
488
+ norm_eps=unet.config.norm_eps,
489
+ cross_attention_dim=unet.config.cross_attention_dim,
490
+ attention_head_dim=unet.config.attention_head_dim,
491
+ num_attention_heads=unet.config.num_attention_heads,
492
+ use_linear_projection=unet.config.use_linear_projection,
493
+ class_embed_type=unet.config.class_embed_type,
494
+ num_class_embeds=unet.config.num_class_embeds,
495
+ upcast_attention=unet.config.upcast_attention,
496
+ resnet_time_scale_shift=unet.config.resnet_time_scale_shift,
497
+ projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,
498
+ mid_block_type=unet.config.mid_block_type,
499
+ controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
500
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
501
+ conditioning_channels=conditioning_channels,
502
+ )
503
+
504
+ if load_weights_from_unet:
505
+ controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())
506
+ controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())
507
+ controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())
508
+
509
+ if controlnet.class_embedding:
510
+ controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())
511
+
512
+ if hasattr(controlnet, "add_embedding"):
513
+ controlnet.add_embedding.load_state_dict(unet.add_embedding.state_dict())
514
+
515
+ controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())
516
+ controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())
517
+
518
+ return controlnet
519
+
520
+ @property
521
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
522
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
523
+ r"""
524
+ Returns:
525
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
526
+ indexed by its weight name.
527
+ """
528
+ # set recursively
529
+ processors = {}
530
+
531
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
532
+ if hasattr(module, "get_processor"):
533
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
534
+
535
+ for sub_name, child in module.named_children():
536
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
537
+
538
+ return processors
539
+
540
+ for name, module in self.named_children():
541
+ fn_recursive_add_processors(name, module, processors)
542
+
543
+ return processors
544
+
545
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
546
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
547
+ r"""
548
+ Sets the attention processor to use to compute attention.
549
+
550
+ Parameters:
551
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
552
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
553
+ for **all** `Attention` layers.
554
+
555
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
556
+ processor. This is strongly recommended when setting trainable attention processors.
557
+
558
+ """
559
+ count = len(self.attn_processors.keys())
560
+
561
+ if isinstance(processor, dict) and len(processor) != count:
562
+ raise ValueError(
563
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
564
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
565
+ )
566
+
567
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
568
+ if hasattr(module, "set_processor"):
569
+ if not isinstance(processor, dict):
570
+ module.set_processor(processor)
571
+ else:
572
+ module.set_processor(processor.pop(f"{name}.processor"))
573
+
574
+ for sub_name, child in module.named_children():
575
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
576
+
577
+ for name, module in self.named_children():
578
+ fn_recursive_attn_processor(name, module, processor)
579
+
580
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
581
+ def set_default_attn_processor(self):
582
+ """
583
+ Disables custom attention processors and sets the default attention implementation.
584
+ """
585
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
586
+ processor = AttnAddedKVProcessor()
587
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
588
+ processor = AttnProcessor()
589
+ else:
590
+ raise ValueError(
591
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
592
+ )
593
+
594
+ self.set_attn_processor(processor)
595
+
596
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attention_slice
597
+ def set_attention_slice(self, slice_size: Union[str, int, List[int]]) -> None:
598
+ r"""
599
+ Enable sliced attention computation.
600
+
601
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
602
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
603
+
604
+ Args:
605
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
606
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
607
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
608
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
609
+ must be a multiple of `slice_size`.
610
+ """
611
+ sliceable_head_dims = []
612
+
613
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
614
+ if hasattr(module, "set_attention_slice"):
615
+ sliceable_head_dims.append(module.sliceable_head_dim)
616
+
617
+ for child in module.children():
618
+ fn_recursive_retrieve_sliceable_dims(child)
619
+
620
+ # retrieve number of attention layers
621
+ for module in self.children():
622
+ fn_recursive_retrieve_sliceable_dims(module)
623
+
624
+ num_sliceable_layers = len(sliceable_head_dims)
625
+
626
+ if slice_size == "auto":
627
+ # half the attention head size is usually a good trade-off between
628
+ # speed and memory
629
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
630
+ elif slice_size == "max":
631
+ # make smallest slice possible
632
+ slice_size = num_sliceable_layers * [1]
633
+
634
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
635
+
636
+ if len(slice_size) != len(sliceable_head_dims):
637
+ raise ValueError(
638
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
639
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
640
+ )
641
+
642
+ for i in range(len(slice_size)):
643
+ size = slice_size[i]
644
+ dim = sliceable_head_dims[i]
645
+ if size is not None and size > dim:
646
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
647
+
648
+ # Recursively walk through all the children.
649
+ # Any children which exposes the set_attention_slice method
650
+ # gets the message
651
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
652
+ if hasattr(module, "set_attention_slice"):
653
+ module.set_attention_slice(slice_size.pop())
654
+
655
+ for child in module.children():
656
+ fn_recursive_set_attention_slice(child, slice_size)
657
+
658
+ reversed_slice_size = list(reversed(slice_size))
659
+ for module in self.children():
660
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
661
+
662
+ def _set_gradient_checkpointing(self, module, value: bool = False) -> None:
663
+ if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):
664
+ module.gradient_checkpointing = value
665
+
666
+ def forward(
667
+ self,
668
+ sample: torch.FloatTensor,
669
+ timestep: Union[torch.Tensor, float, int],
670
+ encoder_hidden_states: torch.Tensor,
671
+ controlnet_cond: torch.FloatTensor,
672
+ conditioning_scale: float = 1.0,
673
+ class_labels: Optional[torch.Tensor] = None,
674
+ timestep_cond: Optional[torch.Tensor] = None,
675
+ attention_mask: Optional[torch.Tensor] = None,
676
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
677
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
678
+ guess_mode: bool = False,
679
+ return_dict: bool = True,
680
+ ) -> Union[ControlNetOutput, Tuple[Tuple[torch.FloatTensor, ...], torch.FloatTensor]]:
681
+ """
682
+ The [`ControlNetModel`] forward method.
683
+
684
+ Args:
685
+ sample (`torch.FloatTensor`):
686
+ The noisy input tensor.
687
+ timestep (`Union[torch.Tensor, float, int]`):
688
+ The number of timesteps to denoise an input.
689
+ encoder_hidden_states (`torch.Tensor`):
690
+ The encoder hidden states.
691
+ controlnet_cond (`torch.FloatTensor`):
692
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
693
+ conditioning_scale (`float`, defaults to `1.0`):
694
+ The scale factor for ControlNet outputs.
695
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
696
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
697
+ timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
698
+ Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
699
+ timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
700
+ embeddings.
701
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
702
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
703
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
704
+ negative values to the attention scores corresponding to "discard" tokens.
705
+ added_cond_kwargs (`dict`):
706
+ Additional conditions for the Stable Diffusion XL UNet.
707
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
708
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
709
+ guess_mode (`bool`, defaults to `False`):
710
+ In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
711
+ you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
712
+ return_dict (`bool`, defaults to `True`):
713
+ Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
714
+
715
+ Returns:
716
+ [`~models.controlnet.ControlNetOutput`] **or** `tuple`:
717
+ If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is
718
+ returned where the first element is the sample tensor.
719
+ """
720
+ # check channel order
721
+ channel_order = self.config.controlnet_conditioning_channel_order
722
+
723
+ if channel_order == "rgb":
724
+ # in rgb order by default
725
+ ...
726
+ elif channel_order == "bgr":
727
+ controlnet_cond = torch.flip(controlnet_cond, dims=[1])
728
+ else:
729
+ raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
730
+
731
+ # prepare attention_mask
732
+ if attention_mask is not None:
733
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
734
+ attention_mask = attention_mask.unsqueeze(1)
735
+
736
+ # 1. time
737
+ timesteps = timestep
738
+ if not torch.is_tensor(timesteps):
739
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
740
+ # This would be a good case for the `match` statement (Python 3.10+)
741
+ is_mps = sample.device.type == "mps"
742
+ if isinstance(timestep, float):
743
+ dtype = torch.float32 if is_mps else torch.float64
744
+ else:
745
+ dtype = torch.int32 if is_mps else torch.int64
746
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
747
+ elif len(timesteps.shape) == 0:
748
+ timesteps = timesteps[None].to(sample.device)
749
+
750
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
751
+ timesteps = timesteps.expand(sample.shape[0])
752
+
753
+ t_emb = self.time_proj(timesteps)
754
+
755
+ # timesteps does not contain any weights and will always return f32 tensors
756
+ # but time_embedding might actually be running in fp16. so we need to cast here.
757
+ # there might be better ways to encapsulate this.
758
+ t_emb = t_emb.to(dtype=sample.dtype)
759
+
760
+ emb = self.time_embedding(t_emb, timestep_cond)
761
+ aug_emb = None
762
+
763
+ if self.class_embedding is not None:
764
+ if class_labels is None:
765
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
766
+
767
+ if self.config.class_embed_type == "timestep":
768
+ class_labels = self.time_proj(class_labels)
769
+
770
+ class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
771
+ emb = emb + class_emb
772
+
773
+ if self.config.addition_embed_type is not None:
774
+ if self.config.addition_embed_type == "text":
775
+ aug_emb = self.add_embedding(encoder_hidden_states)
776
+
777
+ elif self.config.addition_embed_type == "text_time":
778
+ if "text_embeds" not in added_cond_kwargs:
779
+ raise ValueError(
780
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
781
+ )
782
+ text_embeds = added_cond_kwargs.get("text_embeds")
783
+ if "time_ids" not in added_cond_kwargs:
784
+ raise ValueError(
785
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
786
+ )
787
+ time_ids = added_cond_kwargs.get("time_ids")
788
+ time_embeds = self.add_time_proj(time_ids.flatten())
789
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
790
+
791
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
792
+ add_embeds = add_embeds.to(emb.dtype)
793
+ aug_emb = self.add_embedding(add_embeds)
794
+
795
+ emb = emb + aug_emb if aug_emb is not None else emb
796
+
797
+ # 2. pre-process
798
+ sample = self.conv_in(sample)
799
+
800
+ controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
801
+ sample = sample + controlnet_cond
802
+
803
+ # 3. down
804
+ down_block_res_samples = (sample,)
805
+ for downsample_block in self.down_blocks:
806
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
807
+ sample, res_samples = downsample_block(
808
+ hidden_states=sample,
809
+ temb=emb,
810
+ encoder_hidden_states=encoder_hidden_states,
811
+ attention_mask=attention_mask,
812
+ cross_attention_kwargs=cross_attention_kwargs,
813
+ )
814
+ else:
815
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
816
+
817
+ down_block_res_samples += res_samples
818
+
819
+ # 4. mid
820
+ if self.mid_block is not None:
821
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
822
+ sample = self.mid_block(
823
+ sample,
824
+ emb,
825
+ encoder_hidden_states=encoder_hidden_states,
826
+ attention_mask=attention_mask,
827
+ cross_attention_kwargs=cross_attention_kwargs,
828
+ )
829
+ else:
830
+ sample = self.mid_block(sample, emb)
831
+
832
+ # 5. Control net blocks
833
+
834
+ controlnet_down_block_res_samples = ()
835
+
836
+ for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
837
+ down_block_res_sample = controlnet_block(down_block_res_sample)
838
+ controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
839
+
840
+ down_block_res_samples = controlnet_down_block_res_samples
841
+
842
+ mid_block_res_sample = self.controlnet_mid_block(sample)
843
+
844
+ # 6. scaling
845
+ if guess_mode and not self.config.global_pool_conditions:
846
+ scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
847
+ scales = scales * conditioning_scale
848
+ down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
849
+ mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
850
+ else:
851
+ down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
852
+ mid_block_res_sample = mid_block_res_sample * conditioning_scale
853
+
854
+ if self.config.global_pool_conditions:
855
+ down_block_res_samples = [
856
+ torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
857
+ ]
858
+ mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True)
859
+
860
+ if not return_dict:
861
+ return (down_block_res_samples, mid_block_res_sample)
862
+
863
+ return ControlNetOutput(
864
+ down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
865
+ )
866
+
867
+
868
+ def zero_module(module):
869
+ for p in module.parameters():
870
+ nn.init.zeros_(p)
871
+ return module
replace_bg/model/image_processor.py ADDED
@@ -0,0 +1,991 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ import warnings
17
+ from typing import List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from PIL import Image, ImageFilter, ImageOps
24
+
25
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
26
+ from diffusers.utils import CONFIG_NAME, PIL_INTERPOLATION, deprecate
27
+ # from .utils import CONFIG_NAME, PIL_INTERPOLATION, deprecate
28
+
29
+
30
+ PipelineImageInput = Union[
31
+ PIL.Image.Image,
32
+ np.ndarray,
33
+ torch.FloatTensor,
34
+ List[PIL.Image.Image],
35
+ List[np.ndarray],
36
+ List[torch.FloatTensor],
37
+ ]
38
+
39
+ PipelineDepthInput = PipelineImageInput
40
+
41
+
42
+ class VaeImageProcessor(ConfigMixin):
43
+ """
44
+ Image processor for VAE.
45
+
46
+ Args:
47
+ do_resize (`bool`, *optional*, defaults to `True`):
48
+ Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`. Can accept
49
+ `height` and `width` arguments from [`image_processor.VaeImageProcessor.preprocess`] method.
50
+ vae_scale_factor (`int`, *optional*, defaults to `8`):
51
+ VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
52
+ resample (`str`, *optional*, defaults to `lanczos`):
53
+ Resampling filter to use when resizing the image.
54
+ do_normalize (`bool`, *optional*, defaults to `True`):
55
+ Whether to normalize the image to [-1,1].
56
+ do_binarize (`bool`, *optional*, defaults to `False`):
57
+ Whether to binarize the image to 0/1.
58
+ do_convert_rgb (`bool`, *optional*, defaults to be `False`):
59
+ Whether to convert the images to RGB format.
60
+ do_convert_grayscale (`bool`, *optional*, defaults to be `False`):
61
+ Whether to convert the images to grayscale format.
62
+ """
63
+
64
+ config_name = CONFIG_NAME
65
+
66
+ @register_to_config
67
+ def __init__(
68
+ self,
69
+ do_resize: bool = True,
70
+ vae_scale_factor: int = 8,
71
+ resample: str = "lanczos",
72
+ do_normalize: bool = True,
73
+ do_binarize: bool = False,
74
+ do_convert_rgb: bool = False,
75
+ do_convert_grayscale: bool = False,
76
+ ):
77
+ super().__init__()
78
+ if do_convert_rgb and do_convert_grayscale:
79
+ raise ValueError(
80
+ "`do_convert_rgb` and `do_convert_grayscale` can not both be set to `True`,"
81
+ " if you intended to convert the image into RGB format, please set `do_convert_grayscale = False`.",
82
+ " if you intended to convert the image into grayscale format, please set `do_convert_rgb = False`",
83
+ )
84
+ self.config.do_convert_rgb = False
85
+
86
+ @staticmethod
87
+ def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]:
88
+ """
89
+ Convert a numpy image or a batch of images to a PIL image.
90
+ """
91
+ if images.ndim == 3:
92
+ images = images[None, ...]
93
+ images = (images * 255).round().astype("uint8")
94
+ if images.shape[-1] == 1:
95
+ # special case for grayscale (single channel) images
96
+ pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
97
+ else:
98
+ pil_images = [Image.fromarray(image) for image in images]
99
+
100
+ return pil_images
101
+
102
+ @staticmethod
103
+ def pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray:
104
+ """
105
+ Convert a PIL image or a list of PIL images to NumPy arrays.
106
+ """
107
+ if not isinstance(images, list):
108
+ images = [images]
109
+ images = [np.array(image).astype(np.float32) / 255.0 for image in images]
110
+ images = np.stack(images, axis=0)
111
+
112
+ return images
113
+
114
+ @staticmethod
115
+ def numpy_to_pt(images: np.ndarray) -> torch.FloatTensor:
116
+ """
117
+ Convert a NumPy image to a PyTorch tensor.
118
+ """
119
+ if images.ndim == 3:
120
+ images = images[..., None]
121
+
122
+ images = torch.from_numpy(images.transpose(0, 3, 1, 2))
123
+ return images
124
+
125
+ @staticmethod
126
+ def pt_to_numpy(images: torch.FloatTensor) -> np.ndarray:
127
+ """
128
+ Convert a PyTorch tensor to a NumPy image.
129
+ """
130
+ images = images.cpu().permute(0, 2, 3, 1).float().numpy()
131
+ return images
132
+
133
+ @staticmethod
134
+ def normalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
135
+ """
136
+ Normalize an image array to [-1,1].
137
+ """
138
+ return 2.0 * images - 1.0
139
+
140
+ @staticmethod
141
+ def denormalize(images: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
142
+ """
143
+ Denormalize an image array to [0,1].
144
+ """
145
+ return (images / 2 + 0.5).clamp(0, 1)
146
+
147
+ @staticmethod
148
+ def convert_to_rgb(image: PIL.Image.Image) -> PIL.Image.Image:
149
+ """
150
+ Converts a PIL image to RGB format.
151
+ """
152
+ image = image.convert("RGB")
153
+
154
+ return image
155
+
156
+ @staticmethod
157
+ def convert_to_grayscale(image: PIL.Image.Image) -> PIL.Image.Image:
158
+ """
159
+ Converts a PIL image to grayscale format.
160
+ """
161
+ image = image.convert("L")
162
+
163
+ return image
164
+
165
+ @staticmethod
166
+ def blur(image: PIL.Image.Image, blur_factor: int = 4) -> PIL.Image.Image:
167
+ """
168
+ Applies Gaussian blur to an image.
169
+ """
170
+ image = image.filter(ImageFilter.GaussianBlur(blur_factor))
171
+
172
+ return image
173
+
174
+ @staticmethod
175
+ def get_crop_region(mask_image: PIL.Image.Image, width: int, height: int, pad=0):
176
+ """
177
+ Finds a rectangular region that contains all masked ares in an image, and expands region to match the aspect ratio of the original image;
178
+ for example, if user drew mask in a 128x32 region, and the dimensions for processing are 512x512, the region will be expanded to 128x128.
179
+
180
+ Args:
181
+ mask_image (PIL.Image.Image): Mask image.
182
+ width (int): Width of the image to be processed.
183
+ height (int): Height of the image to be processed.
184
+ pad (int, optional): Padding to be added to the crop region. Defaults to 0.
185
+
186
+ Returns:
187
+ tuple: (x1, y1, x2, y2) represent a rectangular region that contains all masked ares in an image and matches the original aspect ratio.
188
+ """
189
+
190
+ mask_image = mask_image.convert("L")
191
+ mask = np.array(mask_image)
192
+
193
+ # 1. find a rectangular region that contains all masked ares in an image
194
+ h, w = mask.shape
195
+ crop_left = 0
196
+ for i in range(w):
197
+ if not (mask[:, i] == 0).all():
198
+ break
199
+ crop_left += 1
200
+
201
+ crop_right = 0
202
+ for i in reversed(range(w)):
203
+ if not (mask[:, i] == 0).all():
204
+ break
205
+ crop_right += 1
206
+
207
+ crop_top = 0
208
+ for i in range(h):
209
+ if not (mask[i] == 0).all():
210
+ break
211
+ crop_top += 1
212
+
213
+ crop_bottom = 0
214
+ for i in reversed(range(h)):
215
+ if not (mask[i] == 0).all():
216
+ break
217
+ crop_bottom += 1
218
+
219
+ # 2. add padding to the crop region
220
+ x1, y1, x2, y2 = (
221
+ int(max(crop_left - pad, 0)),
222
+ int(max(crop_top - pad, 0)),
223
+ int(min(w - crop_right + pad, w)),
224
+ int(min(h - crop_bottom + pad, h)),
225
+ )
226
+
227
+ # 3. expands crop region to match the aspect ratio of the image to be processed
228
+ ratio_crop_region = (x2 - x1) / (y2 - y1)
229
+ ratio_processing = width / height
230
+
231
+ if ratio_crop_region > ratio_processing:
232
+ desired_height = (x2 - x1) / ratio_processing
233
+ desired_height_diff = int(desired_height - (y2 - y1))
234
+ y1 -= desired_height_diff // 2
235
+ y2 += desired_height_diff - desired_height_diff // 2
236
+ if y2 >= mask_image.height:
237
+ diff = y2 - mask_image.height
238
+ y2 -= diff
239
+ y1 -= diff
240
+ if y1 < 0:
241
+ y2 -= y1
242
+ y1 -= y1
243
+ if y2 >= mask_image.height:
244
+ y2 = mask_image.height
245
+ else:
246
+ desired_width = (y2 - y1) * ratio_processing
247
+ desired_width_diff = int(desired_width - (x2 - x1))
248
+ x1 -= desired_width_diff // 2
249
+ x2 += desired_width_diff - desired_width_diff // 2
250
+ if x2 >= mask_image.width:
251
+ diff = x2 - mask_image.width
252
+ x2 -= diff
253
+ x1 -= diff
254
+ if x1 < 0:
255
+ x2 -= x1
256
+ x1 -= x1
257
+ if x2 >= mask_image.width:
258
+ x2 = mask_image.width
259
+
260
+ return x1, y1, x2, y2
261
+
262
+ def _resize_and_fill(
263
+ self,
264
+ image: PIL.Image.Image,
265
+ width: int,
266
+ height: int,
267
+ ) -> PIL.Image.Image:
268
+ """
269
+ Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
270
+
271
+ Args:
272
+ image: The image to resize.
273
+ width: The width to resize the image to.
274
+ height: The height to resize the image to.
275
+ """
276
+
277
+ ratio = width / height
278
+ src_ratio = image.width / image.height
279
+
280
+ src_w = width if ratio < src_ratio else image.width * height // image.height
281
+ src_h = height if ratio >= src_ratio else image.height * width // image.width
282
+
283
+ resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"])
284
+ res = Image.new("RGB", (width, height))
285
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
286
+
287
+ if ratio < src_ratio:
288
+ fill_height = height // 2 - src_h // 2
289
+ if fill_height > 0:
290
+ res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
291
+ res.paste(
292
+ resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)),
293
+ box=(0, fill_height + src_h),
294
+ )
295
+ elif ratio > src_ratio:
296
+ fill_width = width // 2 - src_w // 2
297
+ if fill_width > 0:
298
+ res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
299
+ res.paste(
300
+ resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)),
301
+ box=(fill_width + src_w, 0),
302
+ )
303
+
304
+ return res
305
+
306
+ def _resize_and_crop(
307
+ self,
308
+ image: PIL.Image.Image,
309
+ width: int,
310
+ height: int,
311
+ ) -> PIL.Image.Image:
312
+ """
313
+ Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
314
+
315
+ Args:
316
+ image: The image to resize.
317
+ width: The width to resize the image to.
318
+ height: The height to resize the image to.
319
+ """
320
+ ratio = width / height
321
+ src_ratio = image.width / image.height
322
+
323
+ src_w = width if ratio > src_ratio else image.width * height // image.height
324
+ src_h = height if ratio <= src_ratio else image.height * width // image.width
325
+
326
+ resized = image.resize((src_w, src_h), resample=PIL_INTERPOLATION["lanczos"])
327
+ res = Image.new("RGB", (width, height))
328
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
329
+ return res
330
+
331
+ def resize(
332
+ self,
333
+ image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
334
+ height: int,
335
+ width: int,
336
+ resize_mode: str = "default", # "default", "fill", "crop"
337
+ ) -> Union[PIL.Image.Image, np.ndarray, torch.Tensor]:
338
+ """
339
+ Resize image.
340
+
341
+ Args:
342
+ image (`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`):
343
+ The image input, can be a PIL image, numpy array or pytorch tensor.
344
+ height (`int`):
345
+ The height to resize to.
346
+ width (`int`):
347
+ The width to resize to.
348
+ resize_mode (`str`, *optional*, defaults to `default`):
349
+ The resize mode to use, can be one of `default` or `fill`. If `default`, will resize the image to fit
350
+ within the specified width and height, and it may not maintaining the original aspect ratio.
351
+ If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
352
+ within the dimensions, filling empty with data from image.
353
+ If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
354
+ within the dimensions, cropping the excess.
355
+ Note that resize_mode `fill` and `crop` are only supported for PIL image input.
356
+
357
+ Returns:
358
+ `PIL.Image.Image`, `np.ndarray` or `torch.Tensor`:
359
+ The resized image.
360
+ """
361
+ if resize_mode != "default" and not isinstance(image, PIL.Image.Image):
362
+ raise ValueError(f"Only PIL image input is supported for resize_mode {resize_mode}")
363
+ if isinstance(image, PIL.Image.Image):
364
+ if resize_mode == "default":
365
+ image = image.resize((width, height), resample=PIL_INTERPOLATION[self.config.resample])
366
+ elif resize_mode == "fill":
367
+ image = self._resize_and_fill(image, width, height)
368
+ elif resize_mode == "crop":
369
+ image = self._resize_and_crop(image, width, height)
370
+ else:
371
+ raise ValueError(f"resize_mode {resize_mode} is not supported")
372
+
373
+ elif isinstance(image, torch.Tensor):
374
+ image = torch.nn.functional.interpolate(
375
+ image,
376
+ size=(height, width),
377
+ )
378
+ elif isinstance(image, np.ndarray):
379
+ image = self.numpy_to_pt(image)
380
+ image = torch.nn.functional.interpolate(
381
+ image,
382
+ size=(height, width),
383
+ )
384
+ image = self.pt_to_numpy(image)
385
+ return image
386
+
387
+ def binarize(self, image: PIL.Image.Image) -> PIL.Image.Image:
388
+ """
389
+ Create a mask.
390
+
391
+ Args:
392
+ image (`PIL.Image.Image`):
393
+ The image input, should be a PIL image.
394
+
395
+ Returns:
396
+ `PIL.Image.Image`:
397
+ The binarized image. Values less than 0.5 are set to 0, values greater than 0.5 are set to 1.
398
+ """
399
+ image[image < 0.5] = 0
400
+ image[image >= 0.5] = 1
401
+
402
+ return image
403
+
404
+ def get_default_height_width(
405
+ self,
406
+ image: Union[PIL.Image.Image, np.ndarray, torch.Tensor],
407
+ height: Optional[int] = None,
408
+ width: Optional[int] = None,
409
+ ) -> Tuple[int, int]:
410
+ """
411
+ This function return the height and width that are downscaled to the next integer multiple of
412
+ `vae_scale_factor`.
413
+
414
+ Args:
415
+ image(`PIL.Image.Image`, `np.ndarray` or `torch.Tensor`):
416
+ The image input, can be a PIL image, numpy array or pytorch tensor. if it is a numpy array, should have
417
+ shape `[batch, height, width]` or `[batch, height, width, channel]` if it is a pytorch tensor, should
418
+ have shape `[batch, channel, height, width]`.
419
+ height (`int`, *optional*, defaults to `None`):
420
+ The height in preprocessed image. If `None`, will use the height of `image` input.
421
+ width (`int`, *optional*`, defaults to `None`):
422
+ The width in preprocessed. If `None`, will use the width of the `image` input.
423
+ """
424
+
425
+ if height is None:
426
+ if isinstance(image, PIL.Image.Image):
427
+ height = image.height
428
+ elif isinstance(image, torch.Tensor):
429
+ height = image.shape[2]
430
+ else:
431
+ height = image.shape[1]
432
+
433
+ if width is None:
434
+ if isinstance(image, PIL.Image.Image):
435
+ width = image.width
436
+ elif isinstance(image, torch.Tensor):
437
+ width = image.shape[3]
438
+ else:
439
+ width = image.shape[2]
440
+
441
+ width, height = (
442
+ x - x % self.config.vae_scale_factor for x in (width, height)
443
+ ) # resize to integer multiple of vae_scale_factor
444
+
445
+ return height, width
446
+
447
+ def preprocess(
448
+ self,
449
+ image: PipelineImageInput,
450
+ height: Optional[int] = None,
451
+ width: Optional[int] = None,
452
+ resize_mode: str = "default", # "default", "fill", "crop"
453
+ crops_coords: Optional[Tuple[int, int, int, int]] = None,
454
+ ) -> torch.Tensor:
455
+ """
456
+ Preprocess the image input.
457
+
458
+ Args:
459
+ image (`pipeline_image_input`):
460
+ The image input, accepted formats are PIL images, NumPy arrays, PyTorch tensors; Also accept list of supported formats.
461
+ height (`int`, *optional*, defaults to `None`):
462
+ The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default height.
463
+ width (`int`, *optional*`, defaults to `None`):
464
+ The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width.
465
+ resize_mode (`str`, *optional*, defaults to `default`):
466
+ The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit
467
+ within the specified width and height, and it may not maintaining the original aspect ratio.
468
+ If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
469
+ within the dimensions, filling empty with data from image.
470
+ If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image
471
+ within the dimensions, cropping the excess.
472
+ Note that resize_mode `fill` and `crop` are only supported for PIL image input.
473
+ crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`):
474
+ The crop coordinates for each image in the batch. If `None`, will not crop the image.
475
+ """
476
+ supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor)
477
+
478
+ # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image
479
+ if self.config.do_convert_grayscale and isinstance(image, (torch.Tensor, np.ndarray)) and image.ndim == 3:
480
+ if isinstance(image, torch.Tensor):
481
+ # if image is a pytorch tensor could have 2 possible shapes:
482
+ # 1. batch x height x width: we should insert the channel dimension at position 1
483
+ # 2. channel x height x width: we should insert batch dimension at position 0,
484
+ # however, since both channel and batch dimension has same size 1, it is same to insert at position 1
485
+ # for simplicity, we insert a dimension of size 1 at position 1 for both cases
486
+ image = image.unsqueeze(1)
487
+ else:
488
+ # if it is a numpy array, it could have 2 possible shapes:
489
+ # 1. batch x height x width: insert channel dimension on last position
490
+ # 2. height x width x channel: insert batch dimension on first position
491
+ if image.shape[-1] == 1:
492
+ image = np.expand_dims(image, axis=0)
493
+ else:
494
+ image = np.expand_dims(image, axis=-1)
495
+
496
+ if isinstance(image, supported_formats):
497
+ image = [image]
498
+ elif not (isinstance(image, list) and all(isinstance(i, supported_formats) for i in image)):
499
+ raise ValueError(
500
+ f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support {', '.join(supported_formats)}"
501
+ )
502
+
503
+ if isinstance(image[0], PIL.Image.Image):
504
+ if crops_coords is not None:
505
+ image = [i.crop(crops_coords) for i in image]
506
+ if self.config.do_resize:
507
+ height, width = self.get_default_height_width(image[0], height, width)
508
+ image = [self.resize(i, height, width, resize_mode=resize_mode) for i in image]
509
+ if self.config.do_convert_rgb:
510
+ image = [self.convert_to_rgb(i) for i in image]
511
+ elif self.config.do_convert_grayscale:
512
+ image = [self.convert_to_grayscale(i) for i in image]
513
+ image = self.pil_to_numpy(image) # to np
514
+ image = self.numpy_to_pt(image) # to pt
515
+
516
+ elif isinstance(image[0], np.ndarray):
517
+ image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
518
+
519
+ image = self.numpy_to_pt(image)
520
+
521
+ height, width = self.get_default_height_width(image, height, width)
522
+ if self.config.do_resize:
523
+ image = self.resize(image, height, width)
524
+
525
+ elif isinstance(image[0], torch.Tensor):
526
+ image = torch.cat(image, axis=0) if image[0].ndim == 4 else torch.stack(image, axis=0)
527
+
528
+ if self.config.do_convert_grayscale and image.ndim == 3:
529
+ image = image.unsqueeze(1)
530
+
531
+ channel = image.shape[1]
532
+ # don't need any preprocess if the image is latents
533
+ if channel >= 4:
534
+ return image
535
+
536
+ height, width = self.get_default_height_width(image, height, width)
537
+ if self.config.do_resize:
538
+ image = self.resize(image, height, width)
539
+
540
+ # expected range [0,1], normalize to [-1,1]
541
+ do_normalize = self.config.do_normalize
542
+ if do_normalize and image.min() < 0:
543
+ warnings.warn(
544
+ "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] "
545
+ f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{image.min()},{image.max()}]",
546
+ FutureWarning,
547
+ )
548
+ do_normalize = False
549
+
550
+ if do_normalize:
551
+ image = self.normalize(image)
552
+
553
+ if self.config.do_binarize:
554
+ image = self.binarize(image)
555
+
556
+ return image
557
+
558
+ def postprocess(
559
+ self,
560
+ image: torch.FloatTensor,
561
+ output_type: str = "pil",
562
+ do_denormalize: Optional[List[bool]] = None,
563
+ ) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]:
564
+ """
565
+ Postprocess the image output from tensor to `output_type`.
566
+
567
+ Args:
568
+ image (`torch.FloatTensor`):
569
+ The image input, should be a pytorch tensor with shape `B x C x H x W`.
570
+ output_type (`str`, *optional*, defaults to `pil`):
571
+ The output type of the image, can be one of `pil`, `np`, `pt`, `latent`.
572
+ do_denormalize (`List[bool]`, *optional*, defaults to `None`):
573
+ Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the
574
+ `VaeImageProcessor` config.
575
+
576
+ Returns:
577
+ `PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`:
578
+ The postprocessed image.
579
+ """
580
+ if not isinstance(image, torch.Tensor):
581
+ raise ValueError(
582
+ f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor"
583
+ )
584
+ if output_type not in ["latent", "pt", "np", "pil"]:
585
+ deprecation_message = (
586
+ f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: "
587
+ "`pil`, `np`, `pt`, `latent`"
588
+ )
589
+ deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
590
+ output_type = "np"
591
+
592
+ if output_type == "latent":
593
+ return image
594
+
595
+ if do_denormalize is None:
596
+ do_denormalize = [self.config.do_normalize] * image.shape[0]
597
+
598
+ image = torch.stack(
599
+ [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]
600
+ )
601
+
602
+ if output_type == "pt":
603
+ return image
604
+
605
+ image = self.pt_to_numpy(image)
606
+
607
+ if output_type == "np":
608
+ return image
609
+
610
+ if output_type == "pil":
611
+ return self.numpy_to_pil(image)
612
+
613
+ def apply_overlay(
614
+ self,
615
+ mask: PIL.Image.Image,
616
+ init_image: PIL.Image.Image,
617
+ image: PIL.Image.Image,
618
+ crop_coords: Optional[Tuple[int, int, int, int]] = None,
619
+ ) -> PIL.Image.Image:
620
+ """
621
+ overlay the inpaint output to the original image
622
+ """
623
+
624
+ width, height = image.width, image.height
625
+
626
+ init_image = self.resize(init_image, width=width, height=height)
627
+ mask = self.resize(mask, width=width, height=height)
628
+
629
+ init_image_masked = PIL.Image.new("RGBa", (width, height))
630
+ init_image_masked.paste(init_image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(mask.convert("L")))
631
+ init_image_masked = init_image_masked.convert("RGBA")
632
+
633
+ if crop_coords is not None:
634
+ x, y, x2, y2 = crop_coords
635
+ w = x2 - x
636
+ h = y2 - y
637
+ base_image = PIL.Image.new("RGBA", (width, height))
638
+ image = self.resize(image, height=h, width=w, resize_mode="crop")
639
+ base_image.paste(image, (x, y))
640
+ image = base_image.convert("RGB")
641
+
642
+ image = image.convert("RGBA")
643
+ image.alpha_composite(init_image_masked)
644
+ image = image.convert("RGB")
645
+
646
+ return image
647
+
648
+
649
+ class VaeImageProcessorLDM3D(VaeImageProcessor):
650
+ """
651
+ Image processor for VAE LDM3D.
652
+
653
+ Args:
654
+ do_resize (`bool`, *optional*, defaults to `True`):
655
+ Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`.
656
+ vae_scale_factor (`int`, *optional*, defaults to `8`):
657
+ VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
658
+ resample (`str`, *optional*, defaults to `lanczos`):
659
+ Resampling filter to use when resizing the image.
660
+ do_normalize (`bool`, *optional*, defaults to `True`):
661
+ Whether to normalize the image to [-1,1].
662
+ """
663
+
664
+ config_name = CONFIG_NAME
665
+
666
+ @register_to_config
667
+ def __init__(
668
+ self,
669
+ do_resize: bool = True,
670
+ vae_scale_factor: int = 8,
671
+ resample: str = "lanczos",
672
+ do_normalize: bool = True,
673
+ ):
674
+ super().__init__()
675
+
676
+ @staticmethod
677
+ def numpy_to_pil(images: np.ndarray) -> List[PIL.Image.Image]:
678
+ """
679
+ Convert a NumPy image or a batch of images to a PIL image.
680
+ """
681
+ if images.ndim == 3:
682
+ images = images[None, ...]
683
+ images = (images * 255).round().astype("uint8")
684
+ if images.shape[-1] == 1:
685
+ # special case for grayscale (single channel) images
686
+ pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
687
+ else:
688
+ pil_images = [Image.fromarray(image[:, :, :3]) for image in images]
689
+
690
+ return pil_images
691
+
692
+ @staticmethod
693
+ def depth_pil_to_numpy(images: Union[List[PIL.Image.Image], PIL.Image.Image]) -> np.ndarray:
694
+ """
695
+ Convert a PIL image or a list of PIL images to NumPy arrays.
696
+ """
697
+ if not isinstance(images, list):
698
+ images = [images]
699
+
700
+ images = [np.array(image).astype(np.float32) / (2**16 - 1) for image in images]
701
+ images = np.stack(images, axis=0)
702
+ return images
703
+
704
+ @staticmethod
705
+ def rgblike_to_depthmap(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
706
+ """
707
+ Args:
708
+ image: RGB-like depth image
709
+
710
+ Returns: depth map
711
+
712
+ """
713
+ return image[:, :, 1] * 2**8 + image[:, :, 2]
714
+
715
+ def numpy_to_depth(self, images: np.ndarray) -> List[PIL.Image.Image]:
716
+ """
717
+ Convert a NumPy depth image or a batch of images to a PIL image.
718
+ """
719
+ if images.ndim == 3:
720
+ images = images[None, ...]
721
+ images_depth = images[:, :, :, 3:]
722
+ if images.shape[-1] == 6:
723
+ images_depth = (images_depth * 255).round().astype("uint8")
724
+ pil_images = [
725
+ Image.fromarray(self.rgblike_to_depthmap(image_depth), mode="I;16") for image_depth in images_depth
726
+ ]
727
+ elif images.shape[-1] == 4:
728
+ images_depth = (images_depth * 65535.0).astype(np.uint16)
729
+ pil_images = [Image.fromarray(image_depth, mode="I;16") for image_depth in images_depth]
730
+ else:
731
+ raise Exception("Not supported")
732
+
733
+ return pil_images
734
+
735
+ def postprocess(
736
+ self,
737
+ image: torch.FloatTensor,
738
+ output_type: str = "pil",
739
+ do_denormalize: Optional[List[bool]] = None,
740
+ ) -> Union[PIL.Image.Image, np.ndarray, torch.FloatTensor]:
741
+ """
742
+ Postprocess the image output from tensor to `output_type`.
743
+
744
+ Args:
745
+ image (`torch.FloatTensor`):
746
+ The image input, should be a pytorch tensor with shape `B x C x H x W`.
747
+ output_type (`str`, *optional*, defaults to `pil`):
748
+ The output type of the image, can be one of `pil`, `np`, `pt`, `latent`.
749
+ do_denormalize (`List[bool]`, *optional*, defaults to `None`):
750
+ Whether to denormalize the image to [0,1]. If `None`, will use the value of `do_normalize` in the
751
+ `VaeImageProcessor` config.
752
+
753
+ Returns:
754
+ `PIL.Image.Image`, `np.ndarray` or `torch.FloatTensor`:
755
+ The postprocessed image.
756
+ """
757
+ if not isinstance(image, torch.Tensor):
758
+ raise ValueError(
759
+ f"Input for postprocessing is in incorrect format: {type(image)}. We only support pytorch tensor"
760
+ )
761
+ if output_type not in ["latent", "pt", "np", "pil"]:
762
+ deprecation_message = (
763
+ f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: "
764
+ "`pil`, `np`, `pt`, `latent`"
765
+ )
766
+ deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
767
+ output_type = "np"
768
+
769
+ if do_denormalize is None:
770
+ do_denormalize = [self.config.do_normalize] * image.shape[0]
771
+
772
+ image = torch.stack(
773
+ [self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])]
774
+ )
775
+
776
+ image = self.pt_to_numpy(image)
777
+
778
+ if output_type == "np":
779
+ if image.shape[-1] == 6:
780
+ image_depth = np.stack([self.rgblike_to_depthmap(im[:, :, 3:]) for im in image], axis=0)
781
+ else:
782
+ image_depth = image[:, :, :, 3:]
783
+ return image[:, :, :, :3], image_depth
784
+
785
+ if output_type == "pil":
786
+ return self.numpy_to_pil(image), self.numpy_to_depth(image)
787
+ else:
788
+ raise Exception(f"This type {output_type} is not supported")
789
+
790
+ def preprocess(
791
+ self,
792
+ rgb: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
793
+ depth: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
794
+ height: Optional[int] = None,
795
+ width: Optional[int] = None,
796
+ target_res: Optional[int] = None,
797
+ ) -> torch.Tensor:
798
+ """
799
+ Preprocess the image input. Accepted formats are PIL images, NumPy arrays or PyTorch tensors.
800
+ """
801
+ supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor)
802
+
803
+ # Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image
804
+ if self.config.do_convert_grayscale and isinstance(rgb, (torch.Tensor, np.ndarray)) and rgb.ndim == 3:
805
+ raise Exception("This is not yet supported")
806
+
807
+ if isinstance(rgb, supported_formats):
808
+ rgb = [rgb]
809
+ depth = [depth]
810
+ elif not (isinstance(rgb, list) and all(isinstance(i, supported_formats) for i in rgb)):
811
+ raise ValueError(
812
+ f"Input is in incorrect format: {[type(i) for i in rgb]}. Currently, we only support {', '.join(supported_formats)}"
813
+ )
814
+
815
+ if isinstance(rgb[0], PIL.Image.Image):
816
+ if self.config.do_convert_rgb:
817
+ raise Exception("This is not yet supported")
818
+ # rgb = [self.convert_to_rgb(i) for i in rgb]
819
+ # depth = [self.convert_to_depth(i) for i in depth] #TODO define convert_to_depth
820
+ if self.config.do_resize or target_res:
821
+ height, width = self.get_default_height_width(rgb[0], height, width) if not target_res else target_res
822
+ rgb = [self.resize(i, height, width) for i in rgb]
823
+ depth = [self.resize(i, height, width) for i in depth]
824
+ rgb = self.pil_to_numpy(rgb) # to np
825
+ rgb = self.numpy_to_pt(rgb) # to pt
826
+
827
+ depth = self.depth_pil_to_numpy(depth) # to np
828
+ depth = self.numpy_to_pt(depth) # to pt
829
+
830
+ elif isinstance(rgb[0], np.ndarray):
831
+ rgb = np.concatenate(rgb, axis=0) if rgb[0].ndim == 4 else np.stack(rgb, axis=0)
832
+ rgb = self.numpy_to_pt(rgb)
833
+ height, width = self.get_default_height_width(rgb, height, width)
834
+ if self.config.do_resize:
835
+ rgb = self.resize(rgb, height, width)
836
+
837
+ depth = np.concatenate(depth, axis=0) if rgb[0].ndim == 4 else np.stack(depth, axis=0)
838
+ depth = self.numpy_to_pt(depth)
839
+ height, width = self.get_default_height_width(depth, height, width)
840
+ if self.config.do_resize:
841
+ depth = self.resize(depth, height, width)
842
+
843
+ elif isinstance(rgb[0], torch.Tensor):
844
+ raise Exception("This is not yet supported")
845
+ # rgb = torch.cat(rgb, axis=0) if rgb[0].ndim == 4 else torch.stack(rgb, axis=0)
846
+
847
+ # if self.config.do_convert_grayscale and rgb.ndim == 3:
848
+ # rgb = rgb.unsqueeze(1)
849
+
850
+ # channel = rgb.shape[1]
851
+
852
+ # height, width = self.get_default_height_width(rgb, height, width)
853
+ # if self.config.do_resize:
854
+ # rgb = self.resize(rgb, height, width)
855
+
856
+ # depth = torch.cat(depth, axis=0) if depth[0].ndim == 4 else torch.stack(depth, axis=0)
857
+
858
+ # if self.config.do_convert_grayscale and depth.ndim == 3:
859
+ # depth = depth.unsqueeze(1)
860
+
861
+ # channel = depth.shape[1]
862
+ # # don't need any preprocess if the image is latents
863
+ # if depth == 4:
864
+ # return rgb, depth
865
+
866
+ # height, width = self.get_default_height_width(depth, height, width)
867
+ # if self.config.do_resize:
868
+ # depth = self.resize(depth, height, width)
869
+ # expected range [0,1], normalize to [-1,1]
870
+ do_normalize = self.config.do_normalize
871
+ if rgb.min() < 0 and do_normalize:
872
+ warnings.warn(
873
+ "Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] "
874
+ f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{rgb.min()},{rgb.max()}]",
875
+ FutureWarning,
876
+ )
877
+ do_normalize = False
878
+
879
+ if do_normalize:
880
+ rgb = self.normalize(rgb)
881
+ depth = self.normalize(depth)
882
+
883
+ if self.config.do_binarize:
884
+ rgb = self.binarize(rgb)
885
+ depth = self.binarize(depth)
886
+
887
+ return rgb, depth
888
+
889
+
890
+ class IPAdapterMaskProcessor(VaeImageProcessor):
891
+ """
892
+ Image processor for IP Adapter image masks.
893
+
894
+ Args:
895
+ do_resize (`bool`, *optional*, defaults to `True`):
896
+ Whether to downscale the image's (height, width) dimensions to multiples of `vae_scale_factor`.
897
+ vae_scale_factor (`int`, *optional*, defaults to `8`):
898
+ VAE scale factor. If `do_resize` is `True`, the image is automatically resized to multiples of this factor.
899
+ resample (`str`, *optional*, defaults to `lanczos`):
900
+ Resampling filter to use when resizing the image.
901
+ do_normalize (`bool`, *optional*, defaults to `False`):
902
+ Whether to normalize the image to [-1,1].
903
+ do_binarize (`bool`, *optional*, defaults to `True`):
904
+ Whether to binarize the image to 0/1.
905
+ do_convert_grayscale (`bool`, *optional*, defaults to be `True`):
906
+ Whether to convert the images to grayscale format.
907
+
908
+ """
909
+
910
+ config_name = CONFIG_NAME
911
+
912
+ @register_to_config
913
+ def __init__(
914
+ self,
915
+ do_resize: bool = True,
916
+ vae_scale_factor: int = 8,
917
+ resample: str = "lanczos",
918
+ do_normalize: bool = False,
919
+ do_binarize: bool = True,
920
+ do_convert_grayscale: bool = True,
921
+ ):
922
+ super().__init__(
923
+ do_resize=do_resize,
924
+ vae_scale_factor=vae_scale_factor,
925
+ resample=resample,
926
+ do_normalize=do_normalize,
927
+ do_binarize=do_binarize,
928
+ do_convert_grayscale=do_convert_grayscale,
929
+ )
930
+
931
+ @staticmethod
932
+ def downsample(mask: torch.FloatTensor, batch_size: int, num_queries: int, value_embed_dim: int):
933
+ """
934
+ Downsamples the provided mask tensor to match the expected dimensions for scaled dot-product attention.
935
+ If the aspect ratio of the mask does not match the aspect ratio of the output image, a warning is issued.
936
+
937
+ Args:
938
+ mask (`torch.FloatTensor`):
939
+ The input mask tensor generated with `IPAdapterMaskProcessor.preprocess()`.
940
+ batch_size (`int`):
941
+ The batch size.
942
+ num_queries (`int`):
943
+ The number of queries.
944
+ value_embed_dim (`int`):
945
+ The dimensionality of the value embeddings.
946
+
947
+ Returns:
948
+ `torch.FloatTensor`:
949
+ The downsampled mask tensor.
950
+
951
+ """
952
+ o_h = mask.shape[1]
953
+ o_w = mask.shape[2]
954
+ ratio = o_w / o_h
955
+ mask_h = int(math.sqrt(num_queries / ratio))
956
+ mask_h = int(mask_h) + int((num_queries % int(mask_h)) != 0)
957
+ mask_w = num_queries // mask_h
958
+
959
+ mask_downsample = F.interpolate(mask.unsqueeze(0), size=(mask_h, mask_w), mode="bicubic").squeeze(0)
960
+
961
+ # Repeat batch_size times
962
+ if mask_downsample.shape[0] < batch_size:
963
+ mask_downsample = mask_downsample.repeat(batch_size, 1, 1)
964
+
965
+ mask_downsample = mask_downsample.view(mask_downsample.shape[0], -1)
966
+
967
+ downsampled_area = mask_h * mask_w
968
+ # If the output image and the mask do not have the same aspect ratio, tensor shapes will not match
969
+ # Pad tensor if downsampled_mask.shape[1] is smaller than num_queries
970
+ if downsampled_area < num_queries:
971
+ warnings.warn(
972
+ "The aspect ratio of the mask does not match the aspect ratio of the output image. "
973
+ "Please update your masks or adjust the output size for optimal performance.",
974
+ UserWarning,
975
+ )
976
+ mask_downsample = F.pad(mask_downsample, (0, num_queries - mask_downsample.shape[1]), value=0.0)
977
+ # Discard last embeddings if downsampled_mask.shape[1] is bigger than num_queries
978
+ if downsampled_area > num_queries:
979
+ warnings.warn(
980
+ "The aspect ratio of the mask does not match the aspect ratio of the output image. "
981
+ "Please update your masks or adjust the output size for optimal performance.",
982
+ UserWarning,
983
+ )
984
+ mask_downsample = mask_downsample[:, :num_queries]
985
+
986
+ # Repeat last dimension to match SDPA output shape
987
+ mask_downsample = mask_downsample.view(mask_downsample.shape[0], mask_downsample.shape[1], 1).repeat(
988
+ 1, 1, value_embed_dim
989
+ )
990
+
991
+ return mask_downsample
replace_bg/model/pipeline_controlnet_sd_xl.py ADDED
@@ -0,0 +1,1465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from transformers import (
24
+ CLIPImageProcessor,
25
+ CLIPTextModel,
26
+ CLIPTextModelWithProjection,
27
+ CLIPTokenizer,
28
+ CLIPVisionModelWithProjection,
29
+ )
30
+
31
+ from diffusers.utils.import_utils import is_invisible_watermark_available
32
+
33
+ from .image_processor import PipelineImageInput, VaeImageProcessor
34
+ from diffusers.loaders import (
35
+ FromSingleFileMixin,
36
+ IPAdapterMixin,
37
+ StableDiffusionXLLoraLoaderMixin,
38
+ TextualInversionLoaderMixin,
39
+ )
40
+
41
+ from .controlnet import ControlNetModel
42
+ from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
43
+ from diffusers.models.attention_processor import (
44
+ AttnProcessor2_0,
45
+ LoRAAttnProcessor2_0,
46
+ LoRAXFormersAttnProcessor,
47
+ XFormersAttnProcessor,
48
+ )
49
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
50
+ from diffusers.schedulers import KarrasDiffusionSchedulers
51
+ from diffusers.utils import (
52
+ USE_PEFT_BACKEND,
53
+ deprecate,
54
+ logging,
55
+ replace_example_docstring,
56
+ scale_lora_layers,
57
+ unscale_lora_layers,
58
+ )
59
+ from diffusers.utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor
60
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
61
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
62
+
63
+
64
+ if is_invisible_watermark_available():
65
+ from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
66
+
67
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
68
+
69
+
70
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
71
+
72
+
73
+ EXAMPLE_DOC_STRING = """
74
+ Examples:
75
+ ```py
76
+ >>> # !pip install opencv-python transformers accelerate
77
+ >>> from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL
78
+ >>> from diffusers.utils import load_image
79
+ >>> import numpy as np
80
+ >>> import torch
81
+
82
+ >>> import cv2
83
+ >>> from PIL import Image
84
+
85
+ >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
86
+ >>> negative_prompt = "low quality, bad quality, sketches"
87
+
88
+ >>> # download an image
89
+ >>> image = load_image(
90
+ ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
91
+ ... )
92
+
93
+ >>> # initialize the models and pipeline
94
+ >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization
95
+ >>> controlnet = ControlNetModel.from_pretrained(
96
+ ... "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
97
+ ... )
98
+ >>> vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
99
+ >>> pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
100
+ ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, vae=vae, torch_dtype=torch.float16
101
+ ... )
102
+ >>> pipe.enable_model_cpu_offload()
103
+
104
+ >>> # get canny image
105
+ >>> image = np.array(image)
106
+ >>> image = cv2.Canny(image, 100, 200)
107
+ >>> image = image[:, :, None]
108
+ >>> image = np.concatenate([image, image, image], axis=2)
109
+ >>> canny_image = Image.fromarray(image)
110
+
111
+ >>> # generate image
112
+ >>> image = pipe(
113
+ ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image
114
+ ... ).images[0]
115
+ ```
116
+ """
117
+
118
+
119
+ class StableDiffusionXLControlNetPipeline(
120
+ DiffusionPipeline,
121
+ TextualInversionLoaderMixin,
122
+ StableDiffusionXLLoraLoaderMixin,
123
+ IPAdapterMixin,
124
+ FromSingleFileMixin,
125
+ ):
126
+ r"""
127
+ Pipeline for text-to-image generation using Stable Diffusion XL with ControlNet guidance.
128
+
129
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
130
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
131
+
132
+ The pipeline also inherits the following loading methods:
133
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
134
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
135
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
136
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
137
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
138
+
139
+ Args:
140
+ vae ([`AutoencoderKL`]):
141
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
142
+ text_encoder ([`~transformers.CLIPTextModel`]):
143
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
144
+ text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]):
145
+ Second frozen text-encoder
146
+ ([laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)).
147
+ tokenizer ([`~transformers.CLIPTokenizer`]):
148
+ A `CLIPTokenizer` to tokenize text.
149
+ tokenizer_2 ([`~transformers.CLIPTokenizer`]):
150
+ A `CLIPTokenizer` to tokenize text.
151
+ unet ([`UNet2DConditionModel`]):
152
+ A `UNet2DConditionModel` to denoise the encoded image latents.
153
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
154
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
155
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
156
+ additional conditioning.
157
+ scheduler ([`SchedulerMixin`]):
158
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
159
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
160
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
161
+ Whether the negative prompt embeddings should always be set to 0. Also see the config of
162
+ `stabilityai/stable-diffusion-xl-base-1-0`.
163
+ add_watermarker (`bool`, *optional*):
164
+ Whether to use the [invisible_watermark](https://github.com/ShieldMnt/invisible-watermark/) library to
165
+ watermark output images. If not defined, it defaults to `True` if the package is installed; otherwise no
166
+ watermarker is used.
167
+ """
168
+
169
+ # leave controlnet out on purpose because it iterates with unet
170
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
171
+ _optional_components = [
172
+ "tokenizer",
173
+ "tokenizer_2",
174
+ "text_encoder",
175
+ "text_encoder_2",
176
+ "feature_extractor",
177
+ "image_encoder",
178
+ ]
179
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
180
+
181
+ def __init__(
182
+ self,
183
+ vae: AutoencoderKL,
184
+ text_encoder: CLIPTextModel,
185
+ text_encoder_2: CLIPTextModelWithProjection,
186
+ tokenizer: CLIPTokenizer,
187
+ tokenizer_2: CLIPTokenizer,
188
+ unet: UNet2DConditionModel,
189
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
190
+ scheduler: KarrasDiffusionSchedulers,
191
+ force_zeros_for_empty_prompt: bool = True,
192
+ add_watermarker: Optional[bool] = None,
193
+ feature_extractor: CLIPImageProcessor = None,
194
+ image_encoder: CLIPVisionModelWithProjection = None,
195
+ ):
196
+ super().__init__()
197
+
198
+ if isinstance(controlnet, (list, tuple)):
199
+ controlnet = MultiControlNetModel(controlnet)
200
+
201
+ self.register_modules(
202
+ vae=vae,
203
+ text_encoder=text_encoder,
204
+ text_encoder_2=text_encoder_2,
205
+ tokenizer=tokenizer,
206
+ tokenizer_2=tokenizer_2,
207
+ unet=unet,
208
+ controlnet=controlnet,
209
+ scheduler=scheduler,
210
+ feature_extractor=feature_extractor,
211
+ image_encoder=image_encoder,
212
+ )
213
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
214
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
215
+ self.control_image_processor = VaeImageProcessor(
216
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
217
+ )
218
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
219
+
220
+ if add_watermarker:
221
+ self.watermark = StableDiffusionXLWatermarker()
222
+ else:
223
+ self.watermark = None
224
+
225
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
226
+
227
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
228
+ def enable_vae_slicing(self):
229
+ r"""
230
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
231
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
232
+ """
233
+ self.vae.enable_slicing()
234
+
235
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
236
+ def disable_vae_slicing(self):
237
+ r"""
238
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
239
+ computing decoding in one step.
240
+ """
241
+ self.vae.disable_slicing()
242
+
243
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
244
+ def enable_vae_tiling(self):
245
+ r"""
246
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
247
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
248
+ processing larger images.
249
+ """
250
+ self.vae.enable_tiling()
251
+
252
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
253
+ def disable_vae_tiling(self):
254
+ r"""
255
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
256
+ computing decoding in one step.
257
+ """
258
+ self.vae.disable_tiling()
259
+
260
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
261
+ def encode_prompt(
262
+ self,
263
+ prompt: str,
264
+ prompt_2: Optional[str] = None,
265
+ device: Optional[torch.device] = None,
266
+ num_images_per_prompt: int = 1,
267
+ do_classifier_free_guidance: bool = True,
268
+ negative_prompt: Optional[str] = None,
269
+ negative_prompt_2: Optional[str] = None,
270
+ prompt_embeds: Optional[torch.FloatTensor] = None,
271
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
272
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
273
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
274
+ lora_scale: Optional[float] = None,
275
+ clip_skip: Optional[int] = None,
276
+ ):
277
+ r"""
278
+ Encodes the prompt into text encoder hidden states.
279
+
280
+ Args:
281
+ prompt (`str` or `List[str]`, *optional*):
282
+ prompt to be encoded
283
+ prompt_2 (`str` or `List[str]`, *optional*):
284
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
285
+ used in both text-encoders
286
+ device: (`torch.device`):
287
+ torch device
288
+ num_images_per_prompt (`int`):
289
+ number of images that should be generated per prompt
290
+ do_classifier_free_guidance (`bool`):
291
+ whether to use classifier free guidance or not
292
+ negative_prompt (`str` or `List[str]`, *optional*):
293
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
294
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
295
+ less than `1`).
296
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
297
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
298
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
299
+ prompt_embeds (`torch.FloatTensor`, *optional*):
300
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
301
+ provided, text embeddings will be generated from `prompt` input argument.
302
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
303
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
304
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
305
+ argument.
306
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
307
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
308
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
309
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
310
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
311
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
312
+ input argument.
313
+ lora_scale (`float`, *optional*):
314
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
315
+ clip_skip (`int`, *optional*):
316
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
317
+ the output of the pre-final layer will be used for computing the prompt embeddings.
318
+ """
319
+ device = device or self._execution_device
320
+
321
+ # set lora scale so that monkey patched LoRA
322
+ # function of text encoder can correctly access it
323
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
324
+ self._lora_scale = lora_scale
325
+
326
+ # dynamically adjust the LoRA scale
327
+ if self.text_encoder is not None:
328
+ if not USE_PEFT_BACKEND:
329
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
330
+ else:
331
+ scale_lora_layers(self.text_encoder, lora_scale)
332
+
333
+ if self.text_encoder_2 is not None:
334
+ if not USE_PEFT_BACKEND:
335
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
336
+ else:
337
+ scale_lora_layers(self.text_encoder_2, lora_scale)
338
+
339
+ prompt = [prompt] if isinstance(prompt, str) else prompt
340
+
341
+ if prompt is not None:
342
+ batch_size = len(prompt)
343
+ else:
344
+ batch_size = prompt_embeds.shape[0]
345
+
346
+ # Define tokenizers and text encoders
347
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
348
+ text_encoders = (
349
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
350
+ )
351
+
352
+ if prompt_embeds is None:
353
+ prompt_2 = prompt_2 or prompt
354
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
355
+
356
+ # textual inversion: procecss multi-vector tokens if necessary
357
+ prompt_embeds_list = []
358
+ prompts = [prompt, prompt_2]
359
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
360
+ if isinstance(self, TextualInversionLoaderMixin):
361
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
362
+
363
+ text_inputs = tokenizer(
364
+ prompt,
365
+ padding="max_length",
366
+ max_length=tokenizer.model_max_length,
367
+ truncation=True,
368
+ return_tensors="pt",
369
+ )
370
+
371
+ text_input_ids = text_inputs.input_ids
372
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
373
+
374
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
375
+ text_input_ids, untruncated_ids
376
+ ):
377
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
378
+ logger.warning(
379
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
380
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
381
+ )
382
+
383
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
384
+
385
+ # We are only ALWAYS interested in the pooled output of the final text encoder
386
+ pooled_prompt_embeds = prompt_embeds[0]
387
+ if clip_skip is None:
388
+ prompt_embeds = prompt_embeds.hidden_states[-2]
389
+ else:
390
+ # "2" because SDXL always indexes from the penultimate layer.
391
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
392
+
393
+ prompt_embeds_list.append(prompt_embeds)
394
+
395
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
396
+
397
+ # get unconditional embeddings for classifier free guidance
398
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
399
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
400
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
401
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
402
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
403
+ negative_prompt = negative_prompt or ""
404
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
405
+
406
+ # normalize str to list
407
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
408
+ negative_prompt_2 = (
409
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
410
+ )
411
+
412
+ uncond_tokens: List[str]
413
+ if prompt is not None and type(prompt) is not type(negative_prompt):
414
+ raise TypeError(
415
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
416
+ f" {type(prompt)}."
417
+ )
418
+ elif batch_size != len(negative_prompt):
419
+ raise ValueError(
420
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
421
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
422
+ " the batch size of `prompt`."
423
+ )
424
+ else:
425
+ uncond_tokens = [negative_prompt, negative_prompt_2]
426
+
427
+ negative_prompt_embeds_list = []
428
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
429
+ if isinstance(self, TextualInversionLoaderMixin):
430
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
431
+
432
+ max_length = prompt_embeds.shape[1]
433
+ uncond_input = tokenizer(
434
+ negative_prompt,
435
+ padding="max_length",
436
+ max_length=max_length,
437
+ truncation=True,
438
+ return_tensors="pt",
439
+ )
440
+
441
+ negative_prompt_embeds = text_encoder(
442
+ uncond_input.input_ids.to(device),
443
+ output_hidden_states=True,
444
+ )
445
+ # We are only ALWAYS interested in the pooled output of the final text encoder
446
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
447
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
448
+
449
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
450
+
451
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
452
+
453
+ if self.text_encoder_2 is not None:
454
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
455
+ else:
456
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
457
+
458
+ bs_embed, seq_len, _ = prompt_embeds.shape
459
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
460
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
461
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
462
+
463
+ if do_classifier_free_guidance:
464
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
465
+ seq_len = negative_prompt_embeds.shape[1]
466
+
467
+ if self.text_encoder_2 is not None:
468
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
469
+ else:
470
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
471
+
472
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
473
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
474
+
475
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
476
+ bs_embed * num_images_per_prompt, -1
477
+ )
478
+ if do_classifier_free_guidance:
479
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
480
+ bs_embed * num_images_per_prompt, -1
481
+ )
482
+
483
+ if self.text_encoder is not None:
484
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
485
+ # Retrieve the original scale by scaling back the LoRA layers
486
+ unscale_lora_layers(self.text_encoder, lora_scale)
487
+
488
+ if self.text_encoder_2 is not None:
489
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
490
+ # Retrieve the original scale by scaling back the LoRA layers
491
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
492
+
493
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
494
+
495
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
496
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
497
+ dtype = next(self.image_encoder.parameters()).dtype
498
+
499
+ if not isinstance(image, torch.Tensor):
500
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
501
+
502
+ image = image.to(device=device, dtype=dtype)
503
+ if output_hidden_states:
504
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
505
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
506
+ uncond_image_enc_hidden_states = self.image_encoder(
507
+ torch.zeros_like(image), output_hidden_states=True
508
+ ).hidden_states[-2]
509
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
510
+ num_images_per_prompt, dim=0
511
+ )
512
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
513
+ else:
514
+ image_embeds = self.image_encoder(image).image_embeds
515
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
516
+ uncond_image_embeds = torch.zeros_like(image_embeds)
517
+
518
+ return image_embeds, uncond_image_embeds
519
+
520
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
521
+ def prepare_ip_adapter_image_embeds(self, ip_adapter_image, device, num_images_per_prompt):
522
+ if not isinstance(ip_adapter_image, list):
523
+ ip_adapter_image = [ip_adapter_image]
524
+
525
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
526
+ raise ValueError(
527
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
528
+ )
529
+
530
+ image_embeds = []
531
+ for single_ip_adapter_image, image_proj_layer in zip(
532
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
533
+ ):
534
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
535
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
536
+ single_ip_adapter_image, device, 1, output_hidden_state
537
+ )
538
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
539
+ single_negative_image_embeds = torch.stack([single_negative_image_embeds] * num_images_per_prompt, dim=0)
540
+
541
+ if self.do_classifier_free_guidance:
542
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
543
+ single_image_embeds = single_image_embeds.to(device)
544
+
545
+ image_embeds.append(single_image_embeds)
546
+
547
+ return image_embeds
548
+
549
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
550
+ def prepare_extra_step_kwargs(self, generator, eta):
551
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
552
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
553
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
554
+ # and should be between [0, 1]
555
+
556
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
557
+ extra_step_kwargs = {}
558
+ if accepts_eta:
559
+ extra_step_kwargs["eta"] = eta
560
+
561
+ # check if the scheduler accepts generator
562
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
563
+ if accepts_generator:
564
+ extra_step_kwargs["generator"] = generator
565
+ return extra_step_kwargs
566
+
567
+ def check_inputs(
568
+ self,
569
+ prompt,
570
+ prompt_2,
571
+ image,
572
+ callback_steps,
573
+ negative_prompt=None,
574
+ negative_prompt_2=None,
575
+ prompt_embeds=None,
576
+ negative_prompt_embeds=None,
577
+ pooled_prompt_embeds=None,
578
+ negative_pooled_prompt_embeds=None,
579
+ controlnet_conditioning_scale=1.0,
580
+ control_guidance_start=0.0,
581
+ control_guidance_end=1.0,
582
+ callback_on_step_end_tensor_inputs=None,
583
+ ):
584
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
585
+ raise ValueError(
586
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
587
+ f" {type(callback_steps)}."
588
+ )
589
+
590
+ if callback_on_step_end_tensor_inputs is not None and not all(
591
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
592
+ ):
593
+ raise ValueError(
594
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
595
+ )
596
+
597
+ if prompt is not None and prompt_embeds is not None:
598
+ raise ValueError(
599
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
600
+ " only forward one of the two."
601
+ )
602
+ elif prompt_2 is not None and prompt_embeds is not None:
603
+ raise ValueError(
604
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
605
+ " only forward one of the two."
606
+ )
607
+ elif prompt is None and prompt_embeds is None:
608
+ raise ValueError(
609
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
610
+ )
611
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
612
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
613
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
614
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
615
+
616
+ if negative_prompt is not None and negative_prompt_embeds is not None:
617
+ raise ValueError(
618
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
619
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
620
+ )
621
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
622
+ raise ValueError(
623
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
624
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
625
+ )
626
+
627
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
628
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
629
+ raise ValueError(
630
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
631
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
632
+ f" {negative_prompt_embeds.shape}."
633
+ )
634
+
635
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
636
+ raise ValueError(
637
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
638
+ )
639
+
640
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
641
+ raise ValueError(
642
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
643
+ )
644
+
645
+ # `prompt` needs more sophisticated handling when there are multiple
646
+ # conditionings.
647
+ if isinstance(self.controlnet, MultiControlNetModel):
648
+ if isinstance(prompt, list):
649
+ logger.warning(
650
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
651
+ " prompts. The conditionings will be fixed across the prompts."
652
+ )
653
+
654
+ # Check `image`
655
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
656
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
657
+ )
658
+ if (
659
+ isinstance(self.controlnet, ControlNetModel)
660
+ or is_compiled
661
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
662
+ ):
663
+ self.check_image(image, prompt, prompt_embeds)
664
+ elif (
665
+ isinstance(self.controlnet, MultiControlNetModel)
666
+ or is_compiled
667
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
668
+ ):
669
+ if not isinstance(image, list):
670
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
671
+
672
+ # When `image` is a nested list:
673
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
674
+ elif any(isinstance(i, list) for i in image):
675
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
676
+ elif len(image) != len(self.controlnet.nets):
677
+ raise ValueError(
678
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
679
+ )
680
+
681
+ for image_ in image:
682
+ self.check_image(image_, prompt, prompt_embeds)
683
+ else:
684
+ assert False
685
+
686
+ # Check `controlnet_conditioning_scale`
687
+ if (
688
+ isinstance(self.controlnet, ControlNetModel)
689
+ or is_compiled
690
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
691
+ ):
692
+ if not isinstance(controlnet_conditioning_scale, float):
693
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
694
+ elif (
695
+ isinstance(self.controlnet, MultiControlNetModel)
696
+ or is_compiled
697
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
698
+ ):
699
+ if isinstance(controlnet_conditioning_scale, list):
700
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
701
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
702
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
703
+ self.controlnet.nets
704
+ ):
705
+ raise ValueError(
706
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
707
+ " the same length as the number of controlnets"
708
+ )
709
+ else:
710
+ assert False
711
+
712
+ if not isinstance(control_guidance_start, (tuple, list)):
713
+ control_guidance_start = [control_guidance_start]
714
+
715
+ if not isinstance(control_guidance_end, (tuple, list)):
716
+ control_guidance_end = [control_guidance_end]
717
+
718
+ if len(control_guidance_start) != len(control_guidance_end):
719
+ raise ValueError(
720
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
721
+ )
722
+
723
+ if isinstance(self.controlnet, MultiControlNetModel):
724
+ if len(control_guidance_start) != len(self.controlnet.nets):
725
+ raise ValueError(
726
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
727
+ )
728
+
729
+ for start, end in zip(control_guidance_start, control_guidance_end):
730
+ if start >= end:
731
+ raise ValueError(
732
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
733
+ )
734
+ if start < 0.0:
735
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
736
+ if end > 1.0:
737
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
738
+
739
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
740
+ def check_image(self, image, prompt, prompt_embeds):
741
+ image_is_pil = isinstance(image, PIL.Image.Image)
742
+ image_is_tensor = isinstance(image, torch.Tensor)
743
+ image_is_np = isinstance(image, np.ndarray)
744
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
745
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
746
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
747
+
748
+ if (
749
+ not image_is_pil
750
+ and not image_is_tensor
751
+ and not image_is_np
752
+ and not image_is_pil_list
753
+ and not image_is_tensor_list
754
+ and not image_is_np_list
755
+ ):
756
+ raise TypeError(
757
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
758
+ )
759
+
760
+ if image_is_pil:
761
+ image_batch_size = 1
762
+ else:
763
+ image_batch_size = len(image)
764
+
765
+ if prompt is not None and isinstance(prompt, str):
766
+ prompt_batch_size = 1
767
+ elif prompt is not None and isinstance(prompt, list):
768
+ prompt_batch_size = len(prompt)
769
+ elif prompt_embeds is not None:
770
+ prompt_batch_size = prompt_embeds.shape[0]
771
+
772
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
773
+ raise ValueError(
774
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
775
+ )
776
+
777
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
778
+ def prepare_image(
779
+ self,
780
+ image,
781
+ width,
782
+ height,
783
+ batch_size,
784
+ num_images_per_prompt,
785
+ device,
786
+ dtype,
787
+ do_classifier_free_guidance=False,
788
+ guess_mode=False,
789
+ ):
790
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
791
+ image_batch_size = image.shape[0]
792
+
793
+ if image_batch_size == 1:
794
+ repeat_by = batch_size
795
+ else:
796
+ # image batch size is the same as prompt batch size
797
+ repeat_by = num_images_per_prompt
798
+
799
+ image = image.repeat_interleave(repeat_by, dim=0)
800
+
801
+ image = image.to(device=device, dtype=dtype)
802
+
803
+ if do_classifier_free_guidance and not guess_mode:
804
+ image = torch.cat([image] * 2)
805
+
806
+ return image
807
+
808
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
809
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
810
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
811
+ if isinstance(generator, list) and len(generator) != batch_size:
812
+ raise ValueError(
813
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
814
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
815
+ )
816
+
817
+ if latents is None:
818
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
819
+ else:
820
+ latents = latents.to(device)
821
+
822
+ # scale the initial noise by the standard deviation required by the scheduler
823
+ latents = latents * self.scheduler.init_noise_sigma
824
+ return latents
825
+
826
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
827
+ def _get_add_time_ids(
828
+ self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
829
+ ):
830
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
831
+
832
+ passed_add_embed_dim = (
833
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
834
+ )
835
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
836
+
837
+ if expected_add_embed_dim != passed_add_embed_dim:
838
+ raise ValueError(
839
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
840
+ )
841
+
842
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
843
+ return add_time_ids
844
+
845
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
846
+ def upcast_vae(self):
847
+ dtype = self.vae.dtype
848
+ self.vae.to(dtype=torch.float32)
849
+ use_torch_2_0_or_xformers = isinstance(
850
+ self.vae.decoder.mid_block.attentions[0].processor,
851
+ (
852
+ AttnProcessor2_0,
853
+ XFormersAttnProcessor,
854
+ LoRAXFormersAttnProcessor,
855
+ LoRAAttnProcessor2_0,
856
+ ),
857
+ )
858
+ # if xformers or torch_2_0 is used attention block does not need
859
+ # to be in float32 which can save lots of memory
860
+ if use_torch_2_0_or_xformers:
861
+ self.vae.post_quant_conv.to(dtype)
862
+ self.vae.decoder.conv_in.to(dtype)
863
+ self.vae.decoder.mid_block.to(dtype)
864
+
865
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
866
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
867
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
868
+
869
+ The suffixes after the scaling factors represent the stages where they are being applied.
870
+
871
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
872
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
873
+
874
+ Args:
875
+ s1 (`float`):
876
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
877
+ mitigate "oversmoothing effect" in the enhanced denoising process.
878
+ s2 (`float`):
879
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
880
+ mitigate "oversmoothing effect" in the enhanced denoising process.
881
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
882
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
883
+ """
884
+ if not hasattr(self, "unet"):
885
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
886
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
887
+
888
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
889
+ def disable_freeu(self):
890
+ """Disables the FreeU mechanism if enabled."""
891
+ self.unet.disable_freeu()
892
+
893
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
894
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
895
+ """
896
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
897
+
898
+ Args:
899
+ timesteps (`torch.Tensor`):
900
+ generate embedding vectors at these timesteps
901
+ embedding_dim (`int`, *optional*, defaults to 512):
902
+ dimension of the embeddings to generate
903
+ dtype:
904
+ data type of the generated embeddings
905
+
906
+ Returns:
907
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
908
+ """
909
+ assert len(w.shape) == 1
910
+ w = w * 1000.0
911
+
912
+ half_dim = embedding_dim // 2
913
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
914
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
915
+ emb = w.to(dtype)[:, None] * emb[None, :]
916
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
917
+ if embedding_dim % 2 == 1: # zero pad
918
+ emb = torch.nn.functional.pad(emb, (0, 1))
919
+ assert emb.shape == (w.shape[0], embedding_dim)
920
+ return emb
921
+
922
+ @property
923
+ def guidance_scale(self):
924
+ return self._guidance_scale
925
+
926
+ @property
927
+ def clip_skip(self):
928
+ return self._clip_skip
929
+
930
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
931
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
932
+ # corresponds to doing no classifier free guidance.
933
+ @property
934
+ def do_classifier_free_guidance(self):
935
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
936
+
937
+ @property
938
+ def cross_attention_kwargs(self):
939
+ return self._cross_attention_kwargs
940
+
941
+ @property
942
+ def num_timesteps(self):
943
+ return self._num_timesteps
944
+
945
+ @torch.no_grad()
946
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
947
+ def __call__(
948
+ self,
949
+ prompt: Union[str, List[str]] = None,
950
+ prompt_2: Optional[Union[str, List[str]]] = None,
951
+ image: PipelineImageInput = None,
952
+ height: Optional[int] = None,
953
+ width: Optional[int] = None,
954
+ num_inference_steps: int = 50,
955
+ guidance_scale: float = 5.0,
956
+ negative_prompt: Optional[Union[str, List[str]]] = None,
957
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
958
+ num_images_per_prompt: Optional[int] = 1,
959
+ eta: float = 0.0,
960
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
961
+ latents: Optional[torch.FloatTensor] = None,
962
+ prompt_embeds: Optional[torch.FloatTensor] = None,
963
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
964
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
965
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
966
+ ip_adapter_image: Optional[PipelineImageInput] = None,
967
+ output_type: Optional[str] = "pil",
968
+ return_dict: bool = True,
969
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
970
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
971
+ guess_mode: bool = False,
972
+ control_guidance_start: Union[float, List[float]] = 0.0,
973
+ control_guidance_end: Union[float, List[float]] = 1.0,
974
+ original_size: Tuple[int, int] = None,
975
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
976
+ target_size: Tuple[int, int] = None,
977
+ negative_original_size: Optional[Tuple[int, int]] = None,
978
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
979
+ negative_target_size: Optional[Tuple[int, int]] = None,
980
+ clip_skip: Optional[int] = None,
981
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
982
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
983
+ **kwargs,
984
+ ):
985
+ r"""
986
+ The call function to the pipeline for generation.
987
+
988
+ Args:
989
+ prompt (`str` or `List[str]`, *optional*):
990
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
991
+ prompt_2 (`str` or `List[str]`, *optional*):
992
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
993
+ used in both text-encoders.
994
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
995
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
996
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
997
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
998
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
999
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
1000
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
1001
+ input to a single ControlNet.
1002
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1003
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
1004
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1005
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1006
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1007
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
1008
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1009
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1010
+ num_inference_steps (`int`, *optional*, defaults to 50):
1011
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1012
+ expense of slower inference.
1013
+ guidance_scale (`float`, *optional*, defaults to 5.0):
1014
+ A higher guidance scale value encourages the model to generate images closely linked to the text
1015
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
1016
+ negative_prompt (`str` or `List[str]`, *optional*):
1017
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
1018
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
1019
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1020
+ The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
1021
+ and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
1022
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1023
+ The number of images to generate per prompt.
1024
+ eta (`float`, *optional*, defaults to 0.0):
1025
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
1026
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
1027
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1028
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
1029
+ generation deterministic.
1030
+ latents (`torch.FloatTensor`, *optional*):
1031
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
1032
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1033
+ tensor is generated by sampling using the supplied random `generator`.
1034
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1035
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
1036
+ provided, text embeddings are generated from the `prompt` input argument.
1037
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1038
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1039
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
1040
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1041
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1042
+ not provided, pooled text embeddings are generated from `prompt` input argument.
1043
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1044
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
1045
+ weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
1046
+ argument.
1047
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1048
+ output_type (`str`, *optional*, defaults to `"pil"`):
1049
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
1050
+ return_dict (`bool`, *optional*, defaults to `True`):
1051
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1052
+ plain tuple.
1053
+ cross_attention_kwargs (`dict`, *optional*):
1054
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
1055
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1056
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1057
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
1058
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
1059
+ the corresponding scale as a list.
1060
+ guess_mode (`bool`, *optional*, defaults to `False`):
1061
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
1062
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
1063
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
1064
+ The percentage of total steps at which the ControlNet starts applying.
1065
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
1066
+ The percentage of total steps at which the ControlNet stops applying.
1067
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1068
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1069
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1070
+ explained in section 2.2 of
1071
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1072
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1073
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1074
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1075
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1076
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1077
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1078
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1079
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1080
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1081
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1082
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1083
+ micro-conditioning as explained in section 2.2 of
1084
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1085
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1086
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1087
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1088
+ micro-conditioning as explained in section 2.2 of
1089
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1090
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1091
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1092
+ To negatively condition the generation process based on a target image resolution. It should be as same
1093
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1094
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1095
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1096
+ clip_skip (`int`, *optional*):
1097
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1098
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1099
+ callback_on_step_end (`Callable`, *optional*):
1100
+ A function that calls at the end of each denoising steps during the inference. The function is called
1101
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1102
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1103
+ `callback_on_step_end_tensor_inputs`.
1104
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1105
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1106
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1107
+ `._callback_tensor_inputs` attribute of your pipeine class.
1108
+
1109
+ Examples:
1110
+
1111
+ Returns:
1112
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1113
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
1114
+ otherwise a `tuple` is returned containing the output images.
1115
+ """
1116
+
1117
+ callback = kwargs.pop("callback", None)
1118
+ callback_steps = kwargs.pop("callback_steps", None)
1119
+
1120
+ if callback is not None:
1121
+ deprecate(
1122
+ "callback",
1123
+ "1.0.0",
1124
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1125
+ )
1126
+ if callback_steps is not None:
1127
+ deprecate(
1128
+ "callback_steps",
1129
+ "1.0.0",
1130
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1131
+ )
1132
+
1133
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1134
+
1135
+ # align format for control guidance
1136
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1137
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1138
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1139
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1140
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1141
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1142
+ control_guidance_start, control_guidance_end = (
1143
+ mult * [control_guidance_start],
1144
+ mult * [control_guidance_end],
1145
+ )
1146
+
1147
+ # 1. Check inputs. Raise error if not correct
1148
+ self.check_inputs(
1149
+ prompt,
1150
+ prompt_2,
1151
+ image,
1152
+ callback_steps,
1153
+ negative_prompt,
1154
+ negative_prompt_2,
1155
+ prompt_embeds,
1156
+ negative_prompt_embeds,
1157
+ pooled_prompt_embeds,
1158
+ negative_pooled_prompt_embeds,
1159
+ controlnet_conditioning_scale,
1160
+ control_guidance_start,
1161
+ control_guidance_end,
1162
+ callback_on_step_end_tensor_inputs,
1163
+ )
1164
+
1165
+ self._guidance_scale = guidance_scale
1166
+ self._clip_skip = clip_skip
1167
+ self._cross_attention_kwargs = cross_attention_kwargs
1168
+
1169
+ # 2. Define call parameters
1170
+ if prompt is not None and isinstance(prompt, str):
1171
+ batch_size = 1
1172
+ elif prompt is not None and isinstance(prompt, list):
1173
+ batch_size = len(prompt)
1174
+ else:
1175
+ batch_size = prompt_embeds.shape[0]
1176
+
1177
+ device = self._execution_device
1178
+
1179
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1180
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1181
+
1182
+ global_pool_conditions = (
1183
+ controlnet.config.global_pool_conditions
1184
+ if isinstance(controlnet, ControlNetModel)
1185
+ else controlnet.nets[0].config.global_pool_conditions
1186
+ )
1187
+ guess_mode = guess_mode or global_pool_conditions
1188
+
1189
+ # 3.1 Encode input prompt
1190
+ text_encoder_lora_scale = (
1191
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1192
+ )
1193
+ (
1194
+ prompt_embeds,
1195
+ negative_prompt_embeds,
1196
+ pooled_prompt_embeds,
1197
+ negative_pooled_prompt_embeds,
1198
+ ) = self.encode_prompt(
1199
+ prompt,
1200
+ prompt_2,
1201
+ device,
1202
+ num_images_per_prompt,
1203
+ self.do_classifier_free_guidance,
1204
+ negative_prompt,
1205
+ negative_prompt_2,
1206
+ prompt_embeds=prompt_embeds,
1207
+ negative_prompt_embeds=negative_prompt_embeds,
1208
+ pooled_prompt_embeds=pooled_prompt_embeds,
1209
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1210
+ lora_scale=text_encoder_lora_scale,
1211
+ clip_skip=self.clip_skip,
1212
+ )
1213
+
1214
+ # 3.2 Encode ip_adapter_image
1215
+ if ip_adapter_image is not None:
1216
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1217
+ ip_adapter_image, device, batch_size * num_images_per_prompt
1218
+ )
1219
+
1220
+ # 4. Prepare image
1221
+ if isinstance(controlnet, ControlNetModel):
1222
+ image = self.prepare_image(
1223
+ image=image,
1224
+ width=width,
1225
+ height=height,
1226
+ batch_size=batch_size * num_images_per_prompt,
1227
+ num_images_per_prompt=num_images_per_prompt,
1228
+ device=device,
1229
+ dtype=controlnet.dtype,
1230
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1231
+ guess_mode=guess_mode,
1232
+ )
1233
+ height, width = image.shape[-2:]
1234
+ height, width = height*self.vae_scale_factor, width*self.vae_scale_factor # for vae controlnet
1235
+ elif isinstance(controlnet, MultiControlNetModel):
1236
+ images = []
1237
+
1238
+ for image_ in image:
1239
+ image_ = self.prepare_image(
1240
+ image=image_,
1241
+ width=width,
1242
+ height=height,
1243
+ batch_size=batch_size * num_images_per_prompt,
1244
+ num_images_per_prompt=num_images_per_prompt,
1245
+ device=device,
1246
+ dtype=controlnet.dtype,
1247
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1248
+ guess_mode=guess_mode,
1249
+ )
1250
+
1251
+ images.append(image_)
1252
+
1253
+ image = images
1254
+ height, width = image[0].shape[-2:]
1255
+ else:
1256
+ assert False
1257
+
1258
+ # 5. Prepare timesteps
1259
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1260
+ timesteps = self.scheduler.timesteps
1261
+ self._num_timesteps = len(timesteps)
1262
+
1263
+ # 6. Prepare latent variables
1264
+ num_channels_latents = self.unet.config.in_channels
1265
+ latents = self.prepare_latents(
1266
+ batch_size * num_images_per_prompt,
1267
+ num_channels_latents,
1268
+ height,
1269
+ width,
1270
+ prompt_embeds.dtype,
1271
+ device,
1272
+ generator,
1273
+ latents,
1274
+ )
1275
+
1276
+ # 6.5 Optionally get Guidance Scale Embedding
1277
+ timestep_cond = None
1278
+ if self.unet.config.time_cond_proj_dim is not None:
1279
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1280
+ timestep_cond = self.get_guidance_scale_embedding(
1281
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1282
+ ).to(device=device, dtype=latents.dtype)
1283
+
1284
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1285
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1286
+
1287
+ # 7.1 Create tensor stating which controlnets to keep
1288
+ controlnet_keep = []
1289
+ for i in range(len(timesteps)):
1290
+ keeps = [
1291
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1292
+ for s, e in zip(control_guidance_start, control_guidance_end)
1293
+ ]
1294
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1295
+
1296
+ # 7.2 Prepare added time ids & embeddings
1297
+ if isinstance(image, list):
1298
+ original_size = original_size or image[0].shape[-2:]
1299
+ else:
1300
+ original_size = original_size or image.shape[-2:]
1301
+ target_size = target_size or (height, width)
1302
+
1303
+ add_text_embeds = pooled_prompt_embeds
1304
+ if self.text_encoder_2 is None:
1305
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1306
+ else:
1307
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1308
+
1309
+ add_time_ids = self._get_add_time_ids(
1310
+ original_size,
1311
+ crops_coords_top_left,
1312
+ target_size,
1313
+ dtype=prompt_embeds.dtype,
1314
+ text_encoder_projection_dim=text_encoder_projection_dim,
1315
+ )
1316
+
1317
+ if negative_original_size is not None and negative_target_size is not None:
1318
+ negative_add_time_ids = self._get_add_time_ids(
1319
+ negative_original_size,
1320
+ negative_crops_coords_top_left,
1321
+ negative_target_size,
1322
+ dtype=prompt_embeds.dtype,
1323
+ text_encoder_projection_dim=text_encoder_projection_dim,
1324
+ )
1325
+ else:
1326
+ negative_add_time_ids = add_time_ids
1327
+
1328
+ if self.do_classifier_free_guidance:
1329
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1330
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1331
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1332
+
1333
+ prompt_embeds = prompt_embeds.to(device)
1334
+ add_text_embeds = add_text_embeds.to(device)
1335
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1336
+
1337
+ # 8. Denoising loop
1338
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1339
+ is_unet_compiled = is_compiled_module(self.unet)
1340
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
1341
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
1342
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1343
+ for i, t in enumerate(timesteps):
1344
+ # Relevant thread:
1345
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
1346
+ if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
1347
+ torch._inductor.cudagraph_mark_step_begin()
1348
+ # expand the latents if we are doing classifier free guidance
1349
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1350
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1351
+
1352
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1353
+
1354
+ # controlnet(s) inference
1355
+ if guess_mode and self.do_classifier_free_guidance:
1356
+ # Infer ControlNet only for the conditional batch.
1357
+ control_model_input = latents
1358
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1359
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1360
+ controlnet_added_cond_kwargs = {
1361
+ "text_embeds": add_text_embeds.chunk(2)[1],
1362
+ "time_ids": add_time_ids.chunk(2)[1],
1363
+ }
1364
+ else:
1365
+ control_model_input = latent_model_input
1366
+ controlnet_prompt_embeds = prompt_embeds
1367
+ controlnet_added_cond_kwargs = added_cond_kwargs
1368
+
1369
+ if isinstance(controlnet_keep[i], list):
1370
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1371
+ else:
1372
+ controlnet_cond_scale = controlnet_conditioning_scale
1373
+ if isinstance(controlnet_cond_scale, list):
1374
+ controlnet_cond_scale = controlnet_cond_scale[0]
1375
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1376
+
1377
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1378
+ control_model_input,
1379
+ t,
1380
+ encoder_hidden_states=controlnet_prompt_embeds,
1381
+ controlnet_cond=image,
1382
+ conditioning_scale=cond_scale,
1383
+ guess_mode=guess_mode,
1384
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1385
+ return_dict=False,
1386
+ )
1387
+
1388
+ if guess_mode and self.do_classifier_free_guidance:
1389
+ # Infered ControlNet only for the conditional batch.
1390
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1391
+ # add 0 to the unconditional batch to keep it unchanged.
1392
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1393
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1394
+
1395
+ if ip_adapter_image is not None:
1396
+ added_cond_kwargs["image_embeds"] = image_embeds
1397
+
1398
+ # predict the noise residual
1399
+ noise_pred = self.unet(
1400
+ latent_model_input,
1401
+ t,
1402
+ encoder_hidden_states=prompt_embeds,
1403
+ timestep_cond=timestep_cond,
1404
+ cross_attention_kwargs=self.cross_attention_kwargs,
1405
+ down_block_additional_residuals=down_block_res_samples,
1406
+ mid_block_additional_residual=mid_block_res_sample,
1407
+ added_cond_kwargs=added_cond_kwargs,
1408
+ return_dict=False,
1409
+ )[0]
1410
+
1411
+ # perform guidance
1412
+ if self.do_classifier_free_guidance:
1413
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1414
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1415
+
1416
+ # compute the previous noisy sample x_t -> x_t-1
1417
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1418
+
1419
+ if callback_on_step_end is not None:
1420
+ callback_kwargs = {}
1421
+ for k in callback_on_step_end_tensor_inputs:
1422
+ callback_kwargs[k] = locals()[k]
1423
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1424
+
1425
+ latents = callback_outputs.pop("latents", latents)
1426
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1427
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1428
+
1429
+ # call the callback, if provided
1430
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1431
+ progress_bar.update()
1432
+ if callback is not None and i % callback_steps == 0:
1433
+ step_idx = i // getattr(self.scheduler, "order", 1)
1434
+ callback(step_idx, t, latents)
1435
+
1436
+ if not output_type == "latent":
1437
+ # make sure the VAE is in float32 mode, as it overflows in float16
1438
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1439
+
1440
+ if needs_upcasting:
1441
+ self.upcast_vae()
1442
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1443
+
1444
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1445
+
1446
+ # cast back to fp16 if needed
1447
+ if needs_upcasting:
1448
+ self.vae.to(dtype=torch.float16)
1449
+ else:
1450
+ image = latents
1451
+
1452
+ if not output_type == "latent":
1453
+ # apply watermark if available
1454
+ if self.watermark is not None:
1455
+ image = self.watermark.apply_watermark(image)
1456
+
1457
+ image = self.image_processor.postprocess(image, output_type=output_type)
1458
+
1459
+ # Offload all models
1460
+ self.maybe_free_model_hooks()
1461
+
1462
+ if not return_dict:
1463
+ return (image,)
1464
+
1465
+ return StableDiffusionXLPipelineOutput(images=image)
replace_bg/utilities.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import numpy as np
4
+ from PIL import Image
5
+
6
+ def resize_image(image)->Image.Image:
7
+ pixel_number = 1024*1024
8
+ granularity_val = 64
9
+ ratio = image.size[0] / image.size[1]
10
+ width = int((pixel_number * ratio) ** 0.5)
11
+ width = width - (width % granularity_val)
12
+ height = int(pixel_number / width)
13
+ height = height - (height % granularity_val)
14
+ return image.resize((width, height))
15
+
16
+ def get_masked_background_image(image, image_mask)->tuple:
17
+ image_mask_pil = image_mask.resize(image.size) # fg is white
18
+ image = np.array(image.convert("RGB")).transpose(2, 0, 1).astype(np.float32) / 255.0
19
+ image_mask = np.array(image_mask_pil.convert("L")).astype(np.float32) / 255.0
20
+ image[:,image_mask < 0.5] = 0 # mask background
21
+ return image, image_mask
22
+
23
+ def get_control_image_tensor(vae, image, mask)->torch.Tensor:
24
+ masked_image, image_mask = get_masked_background_image(image, mask)
25
+ masked_image_tensor = torch.from_numpy(masked_image)
26
+ masked_image_tensor = (masked_image_tensor - 0.5) / 0.5 # normalize for vae
27
+ masked_image_tensor = masked_image_tensor.unsqueeze(0).to(device="cuda:0")
28
+ # encode the image to get the control latents
29
+ control_latents = vae.encode(
30
+ masked_image_tensor[:, :3, :, :].to(vae.dtype)
31
+ ).latent_dist.sample()
32
+ control_latents = control_latents * vae.config.scaling_factor
33
+
34
+ mask_tensor = torch.tensor(image_mask, dtype=torch.float32)[None, None, ...].to(device="cuda:0")
35
+ mask_tensor = torch.where(mask_tensor > 0.5, 1.0, 0) # binarize the mask
36
+ mask_resized = torch.nn.functional.interpolate(mask_tensor, size=(control_latents.shape[2], control_latents.shape[3]), mode='nearest')
37
+ control_tensor = torch.cat([control_latents, mask_resized], dim=1)
38
+ return control_tensor
39
+
40
+ def remove_bg_from_image(image_path: str)->Image.Image:
41
+ from transformers import pipeline
42
+ pipe = pipeline("image-segmentation", model="briaai/RMBG-1.4", trust_remote_code=True)
43
+ mask = pipe(image_path, return_mask = True) # outputs a pillow mask
44
+ return mask
45
+
46
+ def paste_fg_over_image(gen_image: Image.Image, orig_image: Image.Image, fg_mask: Image.Image)->Image.Image:
47
+ fg_mask = fg_mask.convert("L")
48
+ fg_mask = fg_mask.resize(orig_image.size, Image.NEAREST)
49
+ gen_image = gen_image.convert("RGBA")
50
+ orig_image = orig_image.convert("RGBA")
51
+ gen_image.paste(orig_image, (0, 0), fg_mask)
52
+ return gen_image.convert("RGB")