diffusers 0.29.0__py3-none-any.whl → 0.29.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -205,6 +205,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
205
205
  self,
206
206
  prompt: Union[str, List[str]] = None,
207
207
  num_images_per_prompt: int = 1,
208
+ max_sequence_length: int = 256,
208
209
  device: Optional[torch.device] = None,
209
210
  dtype: Optional[torch.dtype] = None,
210
211
  ):
@@ -216,7 +217,11 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
216
217
 
217
218
  if self.text_encoder_3 is None:
218
219
  return torch.zeros(
219
- (batch_size, self.tokenizer_max_length, self.transformer.config.joint_attention_dim),
220
+ (
221
+ batch_size * num_images_per_prompt,
222
+ self.tokenizer_max_length,
223
+ self.transformer.config.joint_attention_dim,
224
+ ),
220
225
  device=device,
221
226
  dtype=dtype,
222
227
  )
@@ -224,7 +229,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
224
229
  text_inputs = self.tokenizer_3(
225
230
  prompt,
226
231
  padding="max_length",
227
- max_length=self.tokenizer_max_length,
232
+ max_length=max_sequence_length,
228
233
  truncation=True,
229
234
  add_special_tokens=True,
230
235
  return_tensors="pt",
@@ -235,8 +240,8 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
235
240
  if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
236
241
  removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
237
242
  logger.warning(
238
- "The following part of your input was truncated because CLIP can only handle sequences up to"
239
- f" {self.tokenizer_max_length} tokens: {removed_text}"
243
+ "The following part of your input was truncated because `max_sequence_length` is set to "
244
+ f" {max_sequence_length} tokens: {removed_text}"
240
245
  )
241
246
 
242
247
  prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0]
@@ -323,6 +328,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
323
328
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
324
329
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
325
330
  clip_skip: Optional[int] = None,
331
+ max_sequence_length: int = 256,
326
332
  ):
327
333
  r"""
328
334
 
@@ -403,6 +409,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
403
409
  t5_prompt_embed = self._get_t5_prompt_embeds(
404
410
  prompt=prompt_3,
405
411
  num_images_per_prompt=num_images_per_prompt,
412
+ max_sequence_length=max_sequence_length,
406
413
  device=device,
407
414
  )
408
415
 
@@ -456,7 +463,10 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
456
463
  negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1)
457
464
 
458
465
  t5_negative_prompt_embed = self._get_t5_prompt_embeds(
459
- prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, device=device
466
+ prompt=negative_prompt_3,
467
+ num_images_per_prompt=num_images_per_prompt,
468
+ max_sequence_length=max_sequence_length,
469
+ device=device,
460
470
  )
461
471
 
462
472
  negative_clip_prompt_embeds = torch.nn.functional.pad(
@@ -486,6 +496,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
486
496
  pooled_prompt_embeds=None,
487
497
  negative_pooled_prompt_embeds=None,
488
498
  callback_on_step_end_tensor_inputs=None,
499
+ max_sequence_length=None,
489
500
  ):
490
501
  if height % 8 != 0 or width % 8 != 0:
491
502
  raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
@@ -557,6 +568,9 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
557
568
  "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
558
569
  )
559
570
 
571
+ if max_sequence_length is not None and max_sequence_length > 512:
572
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
573
+
560
574
  def prepare_latents(
561
575
  self,
562
576
  batch_size,
@@ -643,6 +657,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
643
657
  clip_skip: Optional[int] = None,
644
658
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
645
659
  callback_on_step_end_tensor_inputs: List[str] = ["latents"],
660
+ max_sequence_length: int = 256,
646
661
  ):
647
662
  r"""
648
663
  Function invoked when calling the pipeline for generation.
@@ -726,6 +741,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
726
741
  The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
727
742
  will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
728
743
  `._callback_tensor_inputs` attribute of your pipeline class.
744
+ max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`.
729
745
 
730
746
  Examples:
731
747
 
@@ -753,6 +769,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
753
769
  pooled_prompt_embeds=pooled_prompt_embeds,
754
770
  negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
755
771
  callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
772
+ max_sequence_length=max_sequence_length,
756
773
  )
757
774
 
758
775
  self._guidance_scale = guidance_scale
@@ -790,6 +807,7 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
790
807
  device=device,
791
808
  clip_skip=self.clip_skip,
792
809
  num_images_per_prompt=num_images_per_prompt,
810
+ max_sequence_length=max_sequence_length,
793
811
  )
794
812
 
795
813
  if self.do_classifier_free_guidance:
@@ -220,6 +220,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
220
220
  self,
221
221
  prompt: Union[str, List[str]] = None,
222
222
  num_images_per_prompt: int = 1,
223
+ max_sequence_length: int = 256,
223
224
  device: Optional[torch.device] = None,
224
225
  dtype: Optional[torch.dtype] = None,
225
226
  ):
@@ -231,7 +232,11 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
231
232
 
232
233
  if self.text_encoder_3 is None:
233
234
  return torch.zeros(
234
- (batch_size, self.tokenizer_max_length, self.transformer.config.joint_attention_dim),
235
+ (
236
+ batch_size * num_images_per_prompt,
237
+ self.tokenizer_max_length,
238
+ self.transformer.config.joint_attention_dim,
239
+ ),
235
240
  device=device,
236
241
  dtype=dtype,
237
242
  )
@@ -239,7 +244,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
239
244
  text_inputs = self.tokenizer_3(
240
245
  prompt,
241
246
  padding="max_length",
242
- max_length=self.tokenizer_max_length,
247
+ max_length=max_sequence_length,
243
248
  truncation=True,
244
249
  add_special_tokens=True,
245
250
  return_tensors="pt",
@@ -250,8 +255,8 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
250
255
  if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
251
256
  removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
252
257
  logger.warning(
253
- "The following part of your input was truncated because CLIP can only handle sequences up to"
254
- f" {self.tokenizer_max_length} tokens: {removed_text}"
258
+ "The following part of your input was truncated because `max_sequence_length` is set to "
259
+ f" {max_sequence_length} tokens: {removed_text}"
255
260
  )
256
261
 
257
262
  prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0]
@@ -340,6 +345,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
340
345
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
341
346
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
342
347
  clip_skip: Optional[int] = None,
348
+ max_sequence_length: int = 256,
343
349
  ):
344
350
  r"""
345
351
 
@@ -420,6 +426,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
420
426
  t5_prompt_embed = self._get_t5_prompt_embeds(
421
427
  prompt=prompt_3,
422
428
  num_images_per_prompt=num_images_per_prompt,
429
+ max_sequence_length=max_sequence_length,
423
430
  device=device,
424
431
  )
425
432
 
@@ -473,7 +480,10 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
473
480
  negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1)
474
481
 
475
482
  t5_negative_prompt_embed = self._get_t5_prompt_embeds(
476
- prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, device=device
483
+ prompt=negative_prompt_3,
484
+ num_images_per_prompt=num_images_per_prompt,
485
+ max_sequence_length=max_sequence_length,
486
+ device=device,
477
487
  )
478
488
 
479
489
  negative_clip_prompt_embeds = torch.nn.functional.pad(
@@ -502,6 +512,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
502
512
  pooled_prompt_embeds=None,
503
513
  negative_pooled_prompt_embeds=None,
504
514
  callback_on_step_end_tensor_inputs=None,
515
+ max_sequence_length=None,
505
516
  ):
506
517
  if strength < 0 or strength > 1:
507
518
  raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
@@ -573,6 +584,9 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
573
584
  "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
574
585
  )
575
586
 
587
+ if max_sequence_length is not None and max_sequence_length > 512:
588
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
589
+
576
590
  def get_timesteps(self, num_inference_steps, strength, device):
577
591
  # get the original timestep using init_timestep
578
592
  init_timestep = min(num_inference_steps * strength, num_inference_steps)
@@ -686,6 +700,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
686
700
  clip_skip: Optional[int] = None,
687
701
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
688
702
  callback_on_step_end_tensor_inputs: List[str] = ["latents"],
703
+ max_sequence_length: int = 256,
689
704
  ):
690
705
  r"""
691
706
  Function invoked when calling the pipeline for generation.
@@ -765,6 +780,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
765
780
  The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
766
781
  will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
767
782
  `._callback_tensor_inputs` attribute of your pipeline class.
783
+ max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`.
768
784
 
769
785
  Examples:
770
786
 
@@ -788,6 +804,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
788
804
  pooled_prompt_embeds=pooled_prompt_embeds,
789
805
  negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
790
806
  callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
807
+ max_sequence_length=max_sequence_length,
791
808
  )
792
809
 
793
810
  self._guidance_scale = guidance_scale
@@ -824,6 +841,7 @@ class StableDiffusion3Img2ImgPipeline(DiffusionPipeline):
824
841
  device=device,
825
842
  clip_skip=self.clip_skip,
826
843
  num_images_per_prompt=num_images_per_prompt,
844
+ max_sequence_length=max_sequence_length,
827
845
  )
828
846
 
829
847
  if self.do_classifier_free_guidance:
@@ -242,6 +242,36 @@ class PriorTransformer(metaclass=DummyObject):
242
242
  requires_backends(cls, ["torch"])
243
243
 
244
244
 
245
+ class SD3ControlNetModel(metaclass=DummyObject):
246
+ _backends = ["torch"]
247
+
248
+ def __init__(self, *args, **kwargs):
249
+ requires_backends(self, ["torch"])
250
+
251
+ @classmethod
252
+ def from_config(cls, *args, **kwargs):
253
+ requires_backends(cls, ["torch"])
254
+
255
+ @classmethod
256
+ def from_pretrained(cls, *args, **kwargs):
257
+ requires_backends(cls, ["torch"])
258
+
259
+
260
+ class SD3MultiControlNetModel(metaclass=DummyObject):
261
+ _backends = ["torch"]
262
+
263
+ def __init__(self, *args, **kwargs):
264
+ requires_backends(self, ["torch"])
265
+
266
+ @classmethod
267
+ def from_config(cls, *args, **kwargs):
268
+ requires_backends(cls, ["torch"])
269
+
270
+ @classmethod
271
+ def from_pretrained(cls, *args, **kwargs):
272
+ requires_backends(cls, ["torch"])
273
+
274
+
245
275
  class SD3Transformer2DModel(metaclass=DummyObject):
246
276
  _backends = ["torch"]
247
277
 
@@ -902,6 +902,21 @@ class StableCascadePriorPipeline(metaclass=DummyObject):
902
902
  requires_backends(cls, ["torch", "transformers"])
903
903
 
904
904
 
905
+ class StableDiffusion3ControlNetPipeline(metaclass=DummyObject):
906
+ _backends = ["torch", "transformers"]
907
+
908
+ def __init__(self, *args, **kwargs):
909
+ requires_backends(self, ["torch", "transformers"])
910
+
911
+ @classmethod
912
+ def from_config(cls, *args, **kwargs):
913
+ requires_backends(cls, ["torch", "transformers"])
914
+
915
+ @classmethod
916
+ def from_pretrained(cls, *args, **kwargs):
917
+ requires_backends(cls, ["torch", "transformers"])
918
+
919
+
905
920
  class StableDiffusion3Img2ImgPipeline(metaclass=DummyObject):
906
921
  _backends = ["torch", "transformers"]
907
922
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: diffusers
3
- Version: 0.29.0
3
+ Version: 0.29.1
4
4
  Summary: State-of-the-art diffusion in PyTorch and JAX.
5
5
  Home-page: https://github.com/huggingface/diffusers
6
6
  Author: The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)
@@ -23,81 +23,81 @@ Description-Content-Type: text/markdown
23
23
  License-File: LICENSE
24
24
  Requires-Dist: importlib-metadata
25
25
  Requires-Dist: filelock
26
- Requires-Dist: huggingface-hub (>=0.23.2)
26
+ Requires-Dist: huggingface-hub >=0.23.2
27
27
  Requires-Dist: numpy
28
- Requires-Dist: regex (!=2019.12.17)
28
+ Requires-Dist: regex !=2019.12.17
29
29
  Requires-Dist: requests
30
- Requires-Dist: safetensors (>=0.3.1)
30
+ Requires-Dist: safetensors >=0.3.1
31
31
  Requires-Dist: Pillow
32
32
  Provides-Extra: dev
33
- Requires-Dist: urllib3 (<=2.0.0) ; extra == 'dev'
34
- Requires-Dist: isort (>=5.5.4) ; extra == 'dev'
35
- Requires-Dist: ruff (==0.1.5) ; extra == 'dev'
36
- Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'dev'
37
- Requires-Dist: compel (==0.1.8) ; extra == 'dev'
38
- Requires-Dist: GitPython (<3.1.19) ; extra == 'dev'
33
+ Requires-Dist: urllib3 <=2.0.0 ; extra == 'dev'
34
+ Requires-Dist: isort >=5.5.4 ; extra == 'dev'
35
+ Requires-Dist: ruff ==0.1.5 ; extra == 'dev'
36
+ Requires-Dist: hf-doc-builder >=0.3.0 ; extra == 'dev'
37
+ Requires-Dist: compel ==0.1.8 ; extra == 'dev'
38
+ Requires-Dist: GitPython <3.1.19 ; extra == 'dev'
39
39
  Requires-Dist: datasets ; extra == 'dev'
40
40
  Requires-Dist: Jinja2 ; extra == 'dev'
41
- Requires-Dist: invisible-watermark (>=0.2.0) ; extra == 'dev'
42
- Requires-Dist: k-diffusion (>=0.0.12) ; extra == 'dev'
41
+ Requires-Dist: invisible-watermark >=0.2.0 ; extra == 'dev'
42
+ Requires-Dist: k-diffusion >=0.0.12 ; extra == 'dev'
43
43
  Requires-Dist: librosa ; extra == 'dev'
44
44
  Requires-Dist: parameterized ; extra == 'dev'
45
45
  Requires-Dist: pytest ; extra == 'dev'
46
46
  Requires-Dist: pytest-timeout ; extra == 'dev'
47
47
  Requires-Dist: pytest-xdist ; extra == 'dev'
48
- Requires-Dist: requests-mock (==1.10.0) ; extra == 'dev'
49
- Requires-Dist: safetensors (>=0.3.1) ; extra == 'dev'
50
- Requires-Dist: sentencepiece (!=0.1.92,>=0.1.91) ; extra == 'dev'
48
+ Requires-Dist: requests-mock ==1.10.0 ; extra == 'dev'
49
+ Requires-Dist: safetensors >=0.3.1 ; extra == 'dev'
50
+ Requires-Dist: sentencepiece !=0.1.92,>=0.1.91 ; extra == 'dev'
51
51
  Requires-Dist: scipy ; extra == 'dev'
52
52
  Requires-Dist: torchvision ; extra == 'dev'
53
- Requires-Dist: transformers (>=4.25.1) ; extra == 'dev'
54
- Requires-Dist: accelerate (>=0.29.3) ; extra == 'dev'
55
- Requires-Dist: protobuf (<4,>=3.20.3) ; extra == 'dev'
53
+ Requires-Dist: transformers >=4.25.1 ; extra == 'dev'
54
+ Requires-Dist: accelerate >=0.29.3 ; extra == 'dev'
55
+ Requires-Dist: protobuf <4,>=3.20.3 ; extra == 'dev'
56
56
  Requires-Dist: tensorboard ; extra == 'dev'
57
- Requires-Dist: peft (>=0.6.0) ; extra == 'dev'
58
- Requires-Dist: torch (>=1.4) ; extra == 'dev'
59
- Requires-Dist: jax (>=0.4.1) ; extra == 'dev'
60
- Requires-Dist: jaxlib (>=0.4.1) ; extra == 'dev'
61
- Requires-Dist: flax (>=0.4.1) ; extra == 'dev'
57
+ Requires-Dist: peft >=0.6.0 ; extra == 'dev'
58
+ Requires-Dist: torch >=1.4 ; extra == 'dev'
59
+ Requires-Dist: jax >=0.4.1 ; extra == 'dev'
60
+ Requires-Dist: jaxlib >=0.4.1 ; extra == 'dev'
61
+ Requires-Dist: flax >=0.4.1 ; extra == 'dev'
62
62
  Provides-Extra: docs
63
- Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'docs'
63
+ Requires-Dist: hf-doc-builder >=0.3.0 ; extra == 'docs'
64
64
  Provides-Extra: flax
65
- Requires-Dist: jax (>=0.4.1) ; extra == 'flax'
66
- Requires-Dist: jaxlib (>=0.4.1) ; extra == 'flax'
67
- Requires-Dist: flax (>=0.4.1) ; extra == 'flax'
65
+ Requires-Dist: jax >=0.4.1 ; extra == 'flax'
66
+ Requires-Dist: jaxlib >=0.4.1 ; extra == 'flax'
67
+ Requires-Dist: flax >=0.4.1 ; extra == 'flax'
68
68
  Provides-Extra: quality
69
- Requires-Dist: urllib3 (<=2.0.0) ; extra == 'quality'
70
- Requires-Dist: isort (>=5.5.4) ; extra == 'quality'
71
- Requires-Dist: ruff (==0.1.5) ; extra == 'quality'
72
- Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'quality'
69
+ Requires-Dist: urllib3 <=2.0.0 ; extra == 'quality'
70
+ Requires-Dist: isort >=5.5.4 ; extra == 'quality'
71
+ Requires-Dist: ruff ==0.1.5 ; extra == 'quality'
72
+ Requires-Dist: hf-doc-builder >=0.3.0 ; extra == 'quality'
73
73
  Provides-Extra: test
74
- Requires-Dist: compel (==0.1.8) ; extra == 'test'
75
- Requires-Dist: GitPython (<3.1.19) ; extra == 'test'
74
+ Requires-Dist: compel ==0.1.8 ; extra == 'test'
75
+ Requires-Dist: GitPython <3.1.19 ; extra == 'test'
76
76
  Requires-Dist: datasets ; extra == 'test'
77
77
  Requires-Dist: Jinja2 ; extra == 'test'
78
- Requires-Dist: invisible-watermark (>=0.2.0) ; extra == 'test'
79
- Requires-Dist: k-diffusion (>=0.0.12) ; extra == 'test'
78
+ Requires-Dist: invisible-watermark >=0.2.0 ; extra == 'test'
79
+ Requires-Dist: k-diffusion >=0.0.12 ; extra == 'test'
80
80
  Requires-Dist: librosa ; extra == 'test'
81
81
  Requires-Dist: parameterized ; extra == 'test'
82
82
  Requires-Dist: pytest ; extra == 'test'
83
83
  Requires-Dist: pytest-timeout ; extra == 'test'
84
84
  Requires-Dist: pytest-xdist ; extra == 'test'
85
- Requires-Dist: requests-mock (==1.10.0) ; extra == 'test'
86
- Requires-Dist: safetensors (>=0.3.1) ; extra == 'test'
87
- Requires-Dist: sentencepiece (!=0.1.92,>=0.1.91) ; extra == 'test'
85
+ Requires-Dist: requests-mock ==1.10.0 ; extra == 'test'
86
+ Requires-Dist: safetensors >=0.3.1 ; extra == 'test'
87
+ Requires-Dist: sentencepiece !=0.1.92,>=0.1.91 ; extra == 'test'
88
88
  Requires-Dist: scipy ; extra == 'test'
89
89
  Requires-Dist: torchvision ; extra == 'test'
90
- Requires-Dist: transformers (>=4.25.1) ; extra == 'test'
90
+ Requires-Dist: transformers >=4.25.1 ; extra == 'test'
91
91
  Provides-Extra: torch
92
- Requires-Dist: torch (>=1.4) ; extra == 'torch'
93
- Requires-Dist: accelerate (>=0.29.3) ; extra == 'torch'
92
+ Requires-Dist: torch >=1.4 ; extra == 'torch'
93
+ Requires-Dist: accelerate >=0.29.3 ; extra == 'torch'
94
94
  Provides-Extra: training
95
- Requires-Dist: accelerate (>=0.29.3) ; extra == 'training'
95
+ Requires-Dist: accelerate >=0.29.3 ; extra == 'training'
96
96
  Requires-Dist: datasets ; extra == 'training'
97
- Requires-Dist: protobuf (<4,>=3.20.3) ; extra == 'training'
97
+ Requires-Dist: protobuf <4,>=3.20.3 ; extra == 'training'
98
98
  Requires-Dist: tensorboard ; extra == 'training'
99
99
  Requires-Dist: Jinja2 ; extra == 'training'
100
- Requires-Dist: peft (>=0.6.0) ; extra == 'training'
100
+ Requires-Dist: peft >=0.6.0 ; extra == 'training'
101
101
 
102
102
  <!---
103
103
  Copyright 2022 - The HuggingFace Team. All rights reserved.
@@ -1,4 +1,4 @@
1
- diffusers/__init__.py,sha256=tmF_YOQuPHpImAexR_2QhNgRyNzkDDvHb7ykeXa8xvA,29965
1
+ diffusers/__init__.py,sha256=9B9Z0h3ACW7GEzdn9XlzBwUljulUoiunbmCGz6mNSl0,30205
2
2
  diffusers/callbacks.py,sha256=m8ariuJC-WaPHMZn1zUyXG8hlAvaOvEW_6YWdKo--eo,6717
3
3
  diffusers/configuration_utils.py,sha256=6t5iVg-yYJKV7ZZKpXnwYMK1FcJ3X09KBKoInsDy27s,32779
4
4
  diffusers/dependency_versions_check.py,sha256=J_ZAEhVN6uLWAOUZCJrcGJ7PYxUek4f_nwGTFM7LTk8,1271
@@ -22,14 +22,14 @@ diffusers/loaders/ip_adapter.py,sha256=2CGSdTC86QXYMDuaZ63DlX4FdRYV1-m2Epi2I7Yjn
22
22
  diffusers/loaders/lora.py,sha256=XoQl7ewYLPxsLP4YaDiT3-GxWcf1emSUqB-YuYgrdWg,79918
23
23
  diffusers/loaders/lora_conversion_utils.py,sha256=BMwKgSMRywPEp6UiooCdO26EjyrAsd0mVYwkCER5Km8,14707
24
24
  diffusers/loaders/peft.py,sha256=hGCGWkGAwarQ0je_LRznpSypwlBpQU3Lx86FtQQ8cNs,8389
25
- diffusers/loaders/single_file.py,sha256=bW9uur_aMTFhIK_g4p0ZCg6tktCoLHM7B3HxFW1avZ8,24247
26
- diffusers/loaders/single_file_model.py,sha256=FHndgABJI9RlLugokFb5jtzIQrzxho7Vl-gFzsxhQR8,13711
27
- diffusers/loaders/single_file_utils.py,sha256=VS7tD5OdXWec0n2oFu_eA0MJSscDIjtH6NmaJvoy-kI,74764
25
+ diffusers/loaders/single_file.py,sha256=7CamkAdRtqceCWFwf9KIOyPqL7oRGEKbHC_JDd1doIs,24685
26
+ diffusers/loaders/single_file_model.py,sha256=aThTJCoeoYF48WMoubwWaYjUP-6PwzfgWjL2v7Y4dLQ,13720
27
+ diffusers/loaders/single_file_utils.py,sha256=hDJvuhEIXJbX55MY-z-ZltYIqEbi7bCX-TpDOkoUZYE,75065
28
28
  diffusers/loaders/textual_inversion.py,sha256=HU8-1SR03UpkQXEQlhJBB0Gxbnlf7njXRh6KjVt3LFo,26999
29
29
  diffusers/loaders/unet.py,sha256=gORAhdw47AYlnW8Liw78MNxhdpUHZn1GbfPZ2bCFqY0,50535
30
30
  diffusers/loaders/unet_loader_utils.py,sha256=9IHd_RlKqMstSO8G7btUdL1-Y3-fGX7Kbc4frEbRVVM,6220
31
31
  diffusers/loaders/utils.py,sha256=IgI-rwNZ-xRx_jIgp61xHkeLAvqm3FSpJ674s5LzE_k,2423
32
- diffusers/models/__init__.py,sha256=o_sfSznTIdMKfxKLWXkf-jVgvsDCLDq0q-_MNisIgdg,4966
32
+ diffusers/models/__init__.py,sha256=Nm1fWa_GzvZ4u4aIm5pcdbdRBjZQqBXawFVNBCJAw54,5138
33
33
  diffusers/models/activations.py,sha256=7gly0cF1lZ7wX_p0w7bk4ja8EAjcqDYnAURQ4-s-f0M,5149
34
34
  diffusers/models/adapter.py,sha256=XuVoUbhLkPEOeClqYiTm8KGMQrXgn2WZU3I73-jkcew,24723
35
35
  diffusers/models/attention.py,sha256=FCs8wO5CUEeYAO7aV0gx2IR8TTMcovIUCLDjJIckW6g,33375
@@ -37,6 +37,7 @@ diffusers/models/attention_flax.py,sha256=Ju2KJCqsx_LIiz0i1pBcek7RMKTmVOIF4SvEcO
37
37
  diffusers/models/attention_processor.py,sha256=4ZdbCIulzoxPGfbVYtlo1hL06uuehihl0K-nZGxHFl0,129950
38
38
  diffusers/models/controlnet.py,sha256=64prFwCNWkvQTUS94N-YkIZoTdUpSlpBdU4vYeOp9no,43276
39
39
  diffusers/models/controlnet_flax.py,sha256=_UuB-tNxQ9eR8v3dqDhF2Mz-6thIdEI6BlY8BpWpkvU,16710
40
+ diffusers/models/controlnet_sd3.py,sha256=0w3XoUbb39l_2GnMpJFLl1ua1GZmRfccjjaf2N0H7EY,17953
40
41
  diffusers/models/controlnet_xs.py,sha256=sgFVAvuXSntq_t_05nwLW2Qagwlg-iXBHeytpP7HZf0,85297
41
42
  diffusers/models/downsampling.py,sha256=INecPKokYAm-z_l5n9r1cE3a2g1f8OtmBqLOZQBjp2w,12372
42
43
  diffusers/models/embeddings.py,sha256=QhFbh-045Sc2arm-d3q6FaEwGEiEYGwgE_56z9PYUzM,49643
@@ -47,7 +48,7 @@ diffusers/models/modeling_flax_pytorch_utils.py,sha256=h8KonTFgb_-4RnESXhJGeuW_b
47
48
  diffusers/models/modeling_flax_utils.py,sha256=HL6sB4vubPny4COMuKbuGrcznoOfxrxS9BrLdKF4FSs,27295
48
49
  diffusers/models/modeling_outputs.py,sha256=XH3sJO34MRW6UuWqqKo05mVqxGSBFRazpap_-YLwO2I,1042
49
50
  diffusers/models/modeling_pytorch_flax_utils.py,sha256=sEf_jVR2nF0_derGLAOKIfSUc7HWNLM61RTXDLGoE7A,6973
50
- diffusers/models/modeling_utils.py,sha256=Zw6sbE1Im5nc8M-mJ6V1sFaWoOmKKP8dCcPGlcnYprA,56751
51
+ diffusers/models/modeling_utils.py,sha256=YzZdwXmI8vZZwPpUznf-ImfWw4agRzXBfbqYutjfpiQ,57221
51
52
  diffusers/models/normalization.py,sha256=QbJLSfMfduIakq8Cl12hVYr_sdKLxsBNmKDP1GA3jao,9880
52
53
  diffusers/models/resnet.py,sha256=ML9EdypGYniSay_EsyswuTlmGi7429WJhYqIW7VEBoQ,32241
53
54
  diffusers/models/resnet_flax.py,sha256=tqRZQCZIq7NlXex3eGldyhRpZjr_EXWl1l2eVflFV7c,4021
@@ -70,7 +71,7 @@ diffusers/models/transformers/pixart_transformer_2d.py,sha256=B4Z1HYKKjvo9cOv2n2
70
71
  diffusers/models/transformers/prior_transformer.py,sha256=-KSsTuYREFoopKCjPcERlu67_e1zgnQuSxZo4_sHxIY,17352
71
72
  diffusers/models/transformers/t5_film_transformer.py,sha256=rem0WHICvYntqtjGtlBqNFVn40BocnMmeH26rY8650s,16024
72
73
  diffusers/models/transformers/transformer_2d.py,sha256=ZXf2MBaegqbOynJoOYpDmcM1xNLtg35gsQSCcI212G8,28862
73
- diffusers/models/transformers/transformer_sd3.py,sha256=wGZBYs9iif5cdMptf8aRDL7GLViPdm7r5Gj5Cbg5Q2c,15339
74
+ diffusers/models/transformers/transformer_sd3.py,sha256=sdvaVvYSs4Rqobzf47C9Eu6rnQ5epM8hvv1Ibfg4BKI,16104
74
75
  diffusers/models/transformers/transformer_temporal.py,sha256=qDxaL2Q7SBdkkFOqXf7iCV6WEK-FRoBXRble_lUzMLo,16934
75
76
  diffusers/models/unets/__init__.py,sha256=srYFA7zEcDY7LxyUB2jz3TdRgsLz8elrWCpT6Y4YXuU,695
76
77
  diffusers/models/unets/unet_1d.py,sha256=hcm9wvBsR_2WMO_va_O7HmHbFdUWGU7DX_KEFzqVKvQ,10787
@@ -88,8 +89,8 @@ diffusers/models/unets/unet_motion_model.py,sha256=UG1i4PwDzN-tlhwb2eNGoRgtNzRck
88
89
  diffusers/models/unets/unet_spatio_temporal_condition.py,sha256=9aaCaYOgsS5BvFLATnq5fB1j8Cc8Lvb-lHAb-c2pt4A,22099
89
90
  diffusers/models/unets/unet_stable_cascade.py,sha256=JDKRKPrlWiWaDy283R0inv6NpIwmpMLoxs_lR42xx48,28390
90
91
  diffusers/models/unets/uvit_2d.py,sha256=ScLWI09Wref-vU25gWYao4DojlfGxMRz7cmCQUKV01A,17338
91
- diffusers/pipelines/__init__.py,sha256=pDtY9LrIX9wZA2XGn1tmBaN1wEIQEl1xAfCuM6kbqlc,23341
92
- diffusers/pipelines/auto_pipeline.py,sha256=IhT3Uz4TIl0zukS859Q3uFMldC7nQ9ipbuY5zYXxgJs,50299
92
+ diffusers/pipelines/__init__.py,sha256=1k-IR6EVLJK2_lDHQDTgeaxw2tn5f2uZzVeu_2OZJTo,23587
93
+ diffusers/pipelines/auto_pipeline.py,sha256=RZNYg_0GSeZ5XO85R4x5AYwxZH9QLuNDrRyg_8vlGOw,50609
93
94
  diffusers/pipelines/free_init_utils.py,sha256=LJ4c3eaozTR6u5otrdbbR-rtscOfRluZptp1O1zam_s,7654
94
95
  diffusers/pipelines/onnx_utils.py,sha256=6TK_wddhFsKqPejOrAHL-cavB45j30Sd8bMOfJGprms,8329
95
96
  diffusers/pipelines/pipeline_flax_utils.py,sha256=u71buhRtMaa60F8rniPq4GAMxfbYnI1ce49Grb6aemw,27406
@@ -126,6 +127,8 @@ diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py,sha256=uLVbU
126
127
  diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py,sha256=XxVSCnS7sJ9FOfkyFqJTsV-nRcx7w1SOe6-oKUcVor8,82443
127
128
  diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py,sha256=up2sXg8b1n0ArsfkAkrlVCziWuz_48hhZYiuHb9JRIo,86586
128
129
  diffusers/pipelines/controlnet/pipeline_flax_controlnet.py,sha256=1spdtZVcZJFmMXoTRn11Ag7SzeVnRFMLN9pXtEQJehQ,22663
130
+ diffusers/pipelines/controlnet_sd3/__init__.py,sha256=wpkZd83kZsCANFmHaagNs94XAZ8PtyrUK-Y2bpuc_oA,1647
131
+ diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py,sha256=D72ybW8LGYqMmanll5QQi8lfpJLE0nwMARt7rsE0ffc,53002
129
132
  diffusers/pipelines/controlnet_xs/__init__.py,sha256=TuIgTKgY4MVB6zaoNTduQAEVRsNptBZQZhnxxQ3hpyg,2403
130
133
  diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py,sha256=T1DVZoxyhMlcTd9nO6orHcQ3INs60HDK4cQm7eFINBg,45524
131
134
  diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py,sha256=9MrRTGq0jK4Qnt6QYpnO2zilsioU5LFf0JqmDxSXleg,56895
@@ -273,8 +276,8 @@ diffusers/pipelines/stable_diffusion/safety_checker_flax.py,sha256=8VrTsmMmbKJE3
273
276
  diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py,sha256=PULQ_c3li4FD8Rn-3q5qCoHoE4Iknx3eZ2_XLy1DbA4,1890
274
277
  diffusers/pipelines/stable_diffusion_3/__init__.py,sha256=MTUjyZkuU6Vohgm-WkmHTiUBu_H6dy7yTJIzfqAXoGM,1734
275
278
  diffusers/pipelines/stable_diffusion_3/pipeline_output.py,sha256=empNHoFAmdz6__yOCX2kuJqZtVdtoGAvVmH5mW42-3s,610
276
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py,sha256=sWYQywVoT1t8tH0pzGSdlGRlnameYJInasLm3GJ-saQ,43632
277
- diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py,sha256=RBFgERk_93ZG74bYO7ANMQ3fYsJBvrLfCRWrcI7Dg70,46260
279
+ diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py,sha256=JE6CCGFeJ4kIhwnTxy-G9mgz_mTNJcmDMBNXpMWOcfY,44419
280
+ diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py,sha256=6ktjqdOtfoxIgx5N11y6LCf_P2qLvP3sTXlGd4LDJV8,47047
278
281
  diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py,sha256=VpZ5FPx9ACTOT4qiEqun2QYeUtx9Rp0YVDwqhYe28QM,1390
279
282
  diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py,sha256=BygpdgD_cNOv3m-WW8A_Qxc5kAwVpyfSDqpIh5LiQ0E,51136
280
283
  diffusers/pipelines/stable_diffusion_diffedit/__init__.py,sha256=JlcUNahRBm0uaPzappogqfjyLDsNW6IeyOfuLs4af5M,1358
@@ -386,13 +389,13 @@ diffusers/utils/dummy_flax_and_transformers_objects.py,sha256=XyiqnjacRb86sS9F_V
386
389
  diffusers/utils/dummy_flax_objects.py,sha256=EIyO7jYPH4yjuBIxysZWE0rka3qPLEl1TmMBt5SwXNA,5316
387
390
  diffusers/utils/dummy_note_seq_objects.py,sha256=DffX40mDzWTMCyYhKudgIeBhtqTSpiSkVzcAMRue8dY,506
388
391
  diffusers/utils/dummy_onnx_objects.py,sha256=4Z61m3P9NUwbebsK58wAKs6y32Id6UaiSRyeHXo3ecA,493
389
- diffusers/utils/dummy_pt_objects.py,sha256=IbHzhxNmMv9kCZsybBAeN2OoKulVARXXbbNvkBSUWuk,31925
392
+ diffusers/utils/dummy_pt_objects.py,sha256=XZoBIQLzYsRh34dPZaE4AT2h5mo34Ze3fB91twHW6U0,32674
390
393
  diffusers/utils/dummy_torch_and_librosa_objects.py,sha256=JUfqU2n3tSKHyWbjSXrpdW_jr-YbMxAvAhLlPa2_Rxs,948
391
394
  diffusers/utils/dummy_torch_and_scipy_objects.py,sha256=zOLdmqbtma5nakkdYgoErISV28yaztmBLI3wrC2Z_bU,537
392
395
  diffusers/utils/dummy_torch_and_torchsde_objects.py,sha256=JPn6XJ3N3BuMq7XLeyMKRsdtmP9csGEZ0AfTp8A4nG0,550
393
396
  diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py,sha256=IMw6Qs9tTdRrMUXyM_Bc_BuJBvw0OVVHNZMOk3suF7g,1151
394
397
  diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py,sha256=SiKni7YZ-pmZrurHU3-lhbDGKOGCCVxSK3GJbrARqgU,3023
395
- diffusers/utils/dummy_torch_and_transformers_objects.py,sha256=eeK3QOV-HTlhcU92TAJQSCU3DDFEEdPvO0M5uWmH96A,51750
398
+ diffusers/utils/dummy_torch_and_transformers_objects.py,sha256=FHcysg935_i0EiFVSPHLtm2Sa_HSyilybZKOWJokkx8,52202
396
399
  diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py,sha256=z-JrPgPo2dWv-buMytUqBd6QqEx8Uha6M1cKa6gR4Dc,621
397
400
  diffusers/utils/dynamic_modules_utils.py,sha256=Kl8Z2hyzP9u5R7a6FS-DTJJHL_JxXrAQR5BNOD6J7Lw,20162
398
401
  diffusers/utils/export_utils.py,sha256=hxgE1gEqy5wh0G7NlIyKTFeu_3NFW82Y74o2THknKrs,4425
@@ -408,9 +411,9 @@ diffusers/utils/state_dict_utils.py,sha256=NsWzyX4eqKCfjLjgChQnFSf7nSQz1XFHgINYB
408
411
  diffusers/utils/testing_utils.py,sha256=pBLNH9FxPMYtD7rnadEt1PiG21dCiWg2svifStGRArk,36818
409
412
  diffusers/utils/torch_utils.py,sha256=6e6cJmPMbkEqXXJLlcKuVz0zZF8fIc7dF-azq-_6-Xw,6234
410
413
  diffusers/utils/versions.py,sha256=-e7XW1TzZ-tsRo9PMQHp-hNGYHuVDFzLtwg3uAJzqdI,4333
411
- diffusers-0.29.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
412
- diffusers-0.29.0.dist-info/METADATA,sha256=b4ZSa0yygQ2jdlzFGP4no1owY08HzsZynzZQ5XRWTWI,19119
413
- diffusers-0.29.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
414
- diffusers-0.29.0.dist-info/entry_points.txt,sha256=_1bvshKV_6_b63_FAkcUs9W6tUKGeIoQ3SHEZsovEWs,72
415
- diffusers-0.29.0.dist-info/top_level.txt,sha256=axJl2884vMSvhzrFrSoht36QXA_6gZN9cKtg4xOO72o,10
416
- diffusers-0.29.0.dist-info/RECORD,,
414
+ diffusers-0.29.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
415
+ diffusers-0.29.1.dist-info/METADATA,sha256=BqFm5bFYNZO3o1C3HxV_GG4gHf1dPAmdGcuco-8_4OU,19033
416
+ diffusers-0.29.1.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
417
+ diffusers-0.29.1.dist-info/entry_points.txt,sha256=_1bvshKV_6_b63_FAkcUs9W6tUKGeIoQ3SHEZsovEWs,72
418
+ diffusers-0.29.1.dist-info/top_level.txt,sha256=axJl2884vMSvhzrFrSoht36QXA_6gZN9cKtg4xOO72o,10
419
+ diffusers-0.29.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.38.4)
2
+ Generator: setuptools (70.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5