InvokeAI 4.2.9.dev3__py3-none-any.whl → 4.2.9.dev5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {InvokeAI-4.2.9.dev3.dist-info → InvokeAI-4.2.9.dev5.dist-info}/METADATA +3 -1
- {InvokeAI-4.2.9.dev3.dist-info → InvokeAI-4.2.9.dev5.dist-info}/RECORD +45 -28
- invokeai/app/invocations/fields.py +11 -0
- invokeai/app/invocations/flux_text_encoder.py +86 -0
- invokeai/app/invocations/flux_text_to_image.py +172 -0
- invokeai/app/invocations/model.py +124 -2
- invokeai/app/invocations/primitives.py +12 -0
- invokeai/app/services/model_install/model_install_default.py +3 -2
- invokeai/app/services/model_records/model_records_base.py +1 -0
- invokeai/app/services/workflow_records/default_workflows/Flux Text to Image.json +266 -0
- invokeai/backend/flux/math.py +32 -0
- invokeai/backend/flux/model.py +117 -0
- invokeai/backend/flux/modules/autoencoder.py +310 -0
- invokeai/backend/flux/modules/conditioner.py +33 -0
- invokeai/backend/flux/modules/layers.py +253 -0
- invokeai/backend/flux/sampling.py +176 -0
- invokeai/backend/flux/util.py +71 -0
- invokeai/backend/model_manager/config.py +60 -3
- invokeai/backend/model_manager/load/model_loaders/flux.py +234 -0
- invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py +6 -1
- invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +12 -2
- invokeai/backend/model_manager/load/model_util.py +12 -1
- invokeai/backend/model_manager/probe.py +57 -12
- invokeai/backend/model_manager/starter_models.py +74 -1
- invokeai/backend/model_manager/util/select_hf_files.py +23 -3
- invokeai/backend/quantization/__init__.py +0 -0
- invokeai/backend/quantization/bnb_llm_int8.py +125 -0
- invokeai/backend/quantization/bnb_nf4.py +156 -0
- invokeai/backend/quantization/scripts/load_flux_model_bnb_llm_int8.py +79 -0
- invokeai/backend/quantization/scripts/load_flux_model_bnb_nf4.py +96 -0
- invokeai/backend/quantization/scripts/quantize_t5_xxl_bnb_llm_int8.py +92 -0
- invokeai/backend/stable_diffusion/diffusion/conditioning_data.py +11 -5
- invokeai/frontend/web/dist/assets/App-DY31F4eS.js +26 -0
- invokeai/frontend/web/dist/assets/ThemeLocaleProvider-CHTCW1Vd.js +1 -0
- invokeai/frontend/web/dist/assets/index-DL1l-JWO.js +563 -0
- invokeai/frontend/web/dist/index.html +1 -1
- invokeai/frontend/web/dist/locales/en.json +7 -2
- invokeai/invocation_api/__init__.py +4 -0
- invokeai/version/invokeai_version.py +1 -1
- invokeai/frontend/web/dist/assets/App-CEWYwnr8.js +0 -26
- invokeai/frontend/web/dist/assets/ThemeLocaleProvider-BnjZcfy9.js +0 -1
- invokeai/frontend/web/dist/assets/index-CMrB5_9v.js +0 -563
- {InvokeAI-4.2.9.dev3.dist-info → InvokeAI-4.2.9.dev5.dist-info}/LICENSE +0 -0
- {InvokeAI-4.2.9.dev3.dist-info → InvokeAI-4.2.9.dev5.dist-info}/LICENSE-SD1+SD2.txt +0 -0
- {InvokeAI-4.2.9.dev3.dist-info → InvokeAI-4.2.9.dev5.dist-info}/LICENSE-SDXL.txt +0 -0
- {InvokeAI-4.2.9.dev3.dist-info → InvokeAI-4.2.9.dev5.dist-info}/WHEEL +0 -0
- {InvokeAI-4.2.9.dev3.dist-info → InvokeAI-4.2.9.dev5.dist-info}/entry_points.txt +0 -0
- {InvokeAI-4.2.9.dev3.dist-info → InvokeAI-4.2.9.dev5.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: InvokeAI
|
|
3
|
-
Version: 4.2.9.
|
|
3
|
+
Version: 4.2.9.dev5
|
|
4
4
|
Summary: An implementation of Stable Diffusion which provides various new features and options to aid the image generation process
|
|
5
5
|
Author-email: The InvokeAI Project <lincoln.stein@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -222,6 +222,7 @@ Requires-Dist: onnxruntime==1.16.3
|
|
|
222
222
|
Requires-Dist: opencv-python==4.9.0.80
|
|
223
223
|
Requires-Dist: pytorch-lightning==2.1.3
|
|
224
224
|
Requires-Dist: safetensors==0.4.3
|
|
225
|
+
Requires-Dist: sentencepiece==0.2.0
|
|
225
226
|
Requires-Dist: spandrel==0.3.4
|
|
226
227
|
Requires-Dist: timm==0.6.13
|
|
227
228
|
Requires-Dist: torch==2.2.2
|
|
@@ -262,6 +263,7 @@ Requires-Dist: rich~=13.3
|
|
|
262
263
|
Requires-Dist: scikit-image~=0.21.0
|
|
263
264
|
Requires-Dist: semver~=3.0.1
|
|
264
265
|
Requires-Dist: test-tube~=0.7.5
|
|
266
|
+
Requires-Dist: bitsandbytes==0.43.3; sys_platform != "darwin"
|
|
265
267
|
Requires-Dist: windows-curses; sys_platform == "win32"
|
|
266
268
|
Provides-Extra: dev
|
|
267
269
|
Requires-Dist: jurigged; extra == "dev"
|
|
@@ -28,7 +28,9 @@ invokeai/app/invocations/crop_latents.py,sha256=frCjcMQeN8G2wweMkZnOUPox79r8ap2r
|
|
|
28
28
|
invokeai/app/invocations/cv.py,sha256=xlIWdxA1x-79Fzz8BNT3uZe2gFn-_dyHNjSS8dRWy-c,1623
|
|
29
29
|
invokeai/app/invocations/denoise_latents.py,sha256=mEZ3C_0PHU2a5C2EYjXU5sba8HbnAcZBuLW4wldBG2o,47718
|
|
30
30
|
invokeai/app/invocations/facetools.py,sha256=wRdN9cxbM5-LhHvoJA0f4wcP9aAf-WPZAfeD6ktWECg,26436
|
|
31
|
-
invokeai/app/invocations/fields.py,sha256=
|
|
31
|
+
invokeai/app/invocations/fields.py,sha256=xdhHrsWGpChgb2VhBjJ2ttqemZ8Bt210sKn6XSyKsDg,25244
|
|
32
|
+
invokeai/app/invocations/flux_text_encoder.py,sha256=xl42hmNuwiG02EY8_OUTpMAo7EkFPAHdxsZrj_xY8PY,3478
|
|
33
|
+
invokeai/app/invocations/flux_text_to_image.py,sha256=XpRq-FcLWovPQRygx4auldOc-5dFlT26xAwdtgDn358,6920
|
|
32
34
|
invokeai/app/invocations/grounding_dino.py,sha256=Mv46FG3mq8X-XK50efdwdbV16B7GI5_kDCedjviXSn4,4560
|
|
33
35
|
invokeai/app/invocations/ideal_size.py,sha256=LyowqAWwv11p9diFQDY5GyUs3jLzj0dgTxBov_WlrLI,2810
|
|
34
36
|
invokeai/app/invocations/image.py,sha256=mURKycFb6V6TNaAkDNNh3fMNwFhHjCjIgVrI6uq4roU,38635
|
|
@@ -39,10 +41,10 @@ invokeai/app/invocations/latents_to_image.py,sha256=T42-aeT8798ITECx_EvK-JqS09WM
|
|
|
39
41
|
invokeai/app/invocations/mask.py,sha256=ojDupVM7tIT-uEtmKM7gG7peLNa5v17X75zr2wk1SBc,5434
|
|
40
42
|
invokeai/app/invocations/math.py,sha256=y00c6frIIowhN2lwTHYDjnuozNAtHGBa8_7FfHcRxLk,10441
|
|
41
43
|
invokeai/app/invocations/metadata.py,sha256=ZrdeeDBpbq03dLqCykhkRc0y8LHN3sA3UQXP86UqQCA,11467
|
|
42
|
-
invokeai/app/invocations/model.py,sha256=
|
|
44
|
+
invokeai/app/invocations/model.py,sha256=MK0685OfrBnYr6TVUTXdPSjGj14qdNNZXaLjCvRBxpc,24221
|
|
43
45
|
invokeai/app/invocations/noise.py,sha256=9cLYCgaMjVxuMxCa4_lPzHXvTo3M7ghBSDrG97bPJdg,3571
|
|
44
46
|
invokeai/app/invocations/param_easing.py,sha256=yEdJY3SMcqiX7jrEAqQGDSFw3S7qBOrviDpbZ0rQyPM,10931
|
|
45
|
-
invokeai/app/invocations/primitives.py,sha256=
|
|
47
|
+
invokeai/app/invocations/primitives.py,sha256=G9q69QsHEQebPmKKLz4P-M8f_MMhJ5qeRhc7utM6sXQ,16372
|
|
46
48
|
invokeai/app/invocations/prompt.py,sha256=NKjGhuM8oAF7P-ipyMF0lR3dl3mmgcqtIWlgndai4Go,3638
|
|
47
49
|
invokeai/app/invocations/resize_latents.py,sha256=QCwXjEiOHGsNqwwdYsA-02kfm4_IMvxZ8FCa2ExVz5M,3802
|
|
48
50
|
invokeai/app/invocations/scheduler.py,sha256=8l9qxGLRTsiMS2xT92DnbU1cHgbmLUFH1-o5-3HICZE,1101
|
|
@@ -118,7 +120,7 @@ invokeai/app/services/model_images/model_images_default.py,sha256=DguPR-8dqZWto2
|
|
|
118
120
|
invokeai/app/services/model_install/__init__.py,sha256=Ro5jmXL0eSD7iuyFaeN_wMn90NuAzU1Xl8FKq1EtgKs,703
|
|
119
121
|
invokeai/app/services/model_install/model_install_base.py,sha256=qSVPNBXfx2NJnOaQz2N1w624xDeEyhw_2wDQmuJPlGs,10015
|
|
120
122
|
invokeai/app/services/model_install/model_install_common.py,sha256=yEq7QDI5Gy8AdqswnHpztdMnpmYHla1K34ryD700wh8,8672
|
|
121
|
-
invokeai/app/services/model_install/model_install_default.py,sha256=
|
|
123
|
+
invokeai/app/services/model_install/model_install_default.py,sha256=clc2UIXkHxkWipXsuoj0LqgwWVJcjHEaUFBOy99Otno,41437
|
|
122
124
|
invokeai/app/services/model_load/__init__.py,sha256=qRbpqIvtfHQg08apIc-8YEbBknB6VCS2ESsdB_gq88w,277
|
|
123
125
|
invokeai/app/services/model_load/model_load_base.py,sha256=ieDxiWYl4fYlwSBCAV0iTbDDV5IrVYbQmH9zUyfUwXk,2035
|
|
124
126
|
invokeai/app/services/model_load/model_load_default.py,sha256=eVWYB5QG3F3UeIvbkZ-jw9zZ2VZ1GFydOMEkSB6BiP0,4729
|
|
@@ -127,7 +129,7 @@ invokeai/app/services/model_manager/model_manager_base.py,sha256=uUAXhQT_hlk8yKq
|
|
|
127
129
|
invokeai/app/services/model_manager/model_manager_common.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
128
130
|
invokeai/app/services/model_manager/model_manager_default.py,sha256=46UecQE8Eq_LN83mZTo6N8Zjuu3pDlrExWZrBuMNiTM,3842
|
|
129
131
|
invokeai/app/services/model_records/__init__.py,sha256=OzsXFpijEMROZWxLq2gaHgpDhUReybvB3AcVcHz0VHE,571
|
|
130
|
-
invokeai/app/services/model_records/model_records_base.py,sha256=
|
|
132
|
+
invokeai/app/services/model_records/model_records_base.py,sha256=KQ6LQvHHdPVDcwIQkHiznZRBVsfeZuRMqRW4Aly8B8g,8349
|
|
131
133
|
invokeai/app/services/model_records/model_records_sql.py,sha256=vVL-aZl2zv40taHyEqJYOY9wW7eThEcbYxuM5-KCml8,13317
|
|
132
134
|
invokeai/app/services/names/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
133
135
|
invokeai/app/services/names/names_base.py,sha256=W-nIGXj0zoQ4PBqJ2ZsO9pOmYKhZI0VqdOkdAEkQC8c,312
|
|
@@ -208,6 +210,7 @@ invokeai/app/services/workflow_records/workflow_records_common.py,sha256=YsqF3BW
|
|
|
208
210
|
invokeai/app/services/workflow_records/workflow_records_sqlite.py,sha256=jmfqNhRE2uHArobgszy7Sk4AIcRqySSYYRIRQ-E9J0Q,8618
|
|
209
211
|
invokeai/app/services/workflow_records/default_workflows/ESRGAN Upscaling with Canny ControlNet.json,sha256=K-blUzhyR6Wi2qxX_4C8yh4fK3bW1MWHKpQ8DbsNGC0,24038
|
|
210
212
|
invokeai/app/services/workflow_records/default_workflows/Face Detailer with IP-Adapter & Canny (See Note in Details).json,sha256=Vmd5xOCZWKaAebIdiwSwlDd2hCtIHYXtmG0cXW7a1qk,40964
|
|
213
|
+
invokeai/app/services/workflow_records/default_workflows/Flux Text to Image.json,sha256=GqknzxQw1R70ElW-VA-vhIHtHA-RF-N-LhAs07YFsd0,8265
|
|
211
214
|
invokeai/app/services/workflow_records/default_workflows/Multi ControlNet (Canny & Depth).json,sha256=K4KOWTYYnOrOhwxWE_GIjsXKhsAY2YxL40uBBGw-TBw,28538
|
|
212
215
|
invokeai/app/services/workflow_records/default_workflows/MultiDiffusion SD1.5.json,sha256=tz5p5Io3VXafpSCA2WORx4wYex00KqUsxqDJUKWZmXs,39959
|
|
213
216
|
invokeai/app/services/workflow_records/default_workflows/MultiDiffusion SDXL.json,sha256=prbM2KQBN8FEhJeS3Ka3sV66QC7Fba9R-Wl24kMNuH8,46050
|
|
@@ -236,6 +239,13 @@ invokeai/backend/model_patcher.py,sha256=A_Y-l-c9oSW5_na_jtgyTfxL6qjkNDYKNn-z32F
|
|
|
236
239
|
invokeai/backend/raw_model.py,sha256=g7RfS65MHixII0uB0bczftXa6XZ2AJrjGes6vBdtWDY,778
|
|
237
240
|
invokeai/backend/spandrel_image_to_image_model.py,sha256=8LEPnxmsq49VdDpJuM8k_sT22owSQU86wzYeyvI8mQc,5237
|
|
238
241
|
invokeai/backend/textual_inversion.py,sha256=ELJpRptFlxZyJSdCxN1-p6BQV_8l9-j6_4VNDSDdtY4,5434
|
|
242
|
+
invokeai/backend/flux/math.py,sha256=HUg2_9D8gTWbBCk83Kbj4YtVpbed1jQp-FfFo_zduAQ,1237
|
|
243
|
+
invokeai/backend/flux/model.py,sha256=7_lwJ-U_yvlqoaF9fSr_pp_oZ7-IY76FNNSbKS16ABo,3800
|
|
244
|
+
invokeai/backend/flux/sampling.py,sha256=plsQWr34OKZrljJ5cbyXeiFDnfFbg5quYvxjskcRteI,5501
|
|
245
|
+
invokeai/backend/flux/util.py,sha256=EhTyFi5D9ZwXBtCa3t5IlfxFFAlRfWzD72SnaJ_iUlY,1537
|
|
246
|
+
invokeai/backend/flux/modules/autoencoder.py,sha256=XzjNn5HimcvquoSV9bTAwDWlRoHrSLMd3IUmmBX5VRo,10501
|
|
247
|
+
invokeai/backend/flux/modules/conditioner.py,sha256=Qg8LtxBj5vc8F620MoR3-PkZKpn5w-u4JpKlbLlKdZI,1186
|
|
248
|
+
invokeai/backend/flux/modules/layers.py,sha256=bwtnTuYqqwgiSDnoxcEMUbApGdLZ0RvJsrGfgqWIz8Q,9442
|
|
239
249
|
invokeai/backend/image_util/__init__.py,sha256=RobCteLw_1EBJf6auQYQOlQT-uVmSqbNzImQIaSl4eI,395
|
|
240
250
|
invokeai/backend/image_util/canny.py,sha256=1NlaLs9V0WyBIn9mF7Gf-LetF6n4gazF8V1r6E9c5kg,1461
|
|
241
251
|
invokeai/backend/image_util/hed.py,sha256=iTGUXTS8jadKDq2zh1Y1-daAvXyaA8YbDuoFGWFcOo4,5445
|
|
@@ -274,17 +284,17 @@ invokeai/backend/ip_adapter/resampler.py,sha256=H8fOGW2ImExAM6LtgbuRudonkkW3Bgce
|
|
|
274
284
|
invokeai/backend/model_hash/hash_validator.py,sha256=h3n7m7PMgoeRAUKsZmyLLnzpMSBjihL4JoX1eCXY1Do,11111
|
|
275
285
|
invokeai/backend/model_hash/model_hash.py,sha256=Bh0GsTWDwCRI7lrWdqEsgqLsiAqU3ECFdqlm53hG_d0,8337
|
|
276
286
|
invokeai/backend/model_manager/__init__.py,sha256=Lp4UYH64YM7TrVeFAVswjWSJyuhJCjOzN-aUkTXbHF0,868
|
|
277
|
-
invokeai/backend/model_manager/config.py,sha256=
|
|
287
|
+
invokeai/backend/model_manager/config.py,sha256=wwa0cIhTwPH9waW-cdcQ71x0ISJwdnPUkiu98-kUolQ,18425
|
|
278
288
|
invokeai/backend/model_manager/merge.py,sha256=Dk6uGfiEDWeHrwmSpSxNKDlQLHGdmnjczJ4KwpVlUsI,7963
|
|
279
|
-
invokeai/backend/model_manager/probe.py,sha256=
|
|
289
|
+
invokeai/backend/model_manager/probe.py,sha256=oQJpNTTdQ60htRGOyxL0pofG6bmWzqJdLxnh0zbohok,39573
|
|
280
290
|
invokeai/backend/model_manager/search.py,sha256=OCRBOyvl2G1RH3IVJf4lyiAMGLUG0IhwpN0V99ATt6o,4893
|
|
281
|
-
invokeai/backend/model_manager/starter_models.py,sha256=
|
|
291
|
+
invokeai/backend/model_manager/starter_models.py,sha256=DaZZ8AAewoIq8Cd_Bi4lt9NRjgUt3z7HKboTu-EXBeE,20903
|
|
282
292
|
invokeai/backend/model_manager/load/__init__.py,sha256=9x2aYphy8NcdyElCqJFTy5WxKqVEtrYmsw0qRDEsxL4,1023
|
|
283
293
|
invokeai/backend/model_manager/load/load_base.py,sha256=YDK1_n1VLjL6q-PgG4OwirKmA-3eD_e9wUvxSYDn6Yw,4784
|
|
284
294
|
invokeai/backend/model_manager/load/load_default.py,sha256=wfPaFwVHaZ4rHy72O-jaHz0ZK5iEk-PiswEuvBFjifQ,4002
|
|
285
295
|
invokeai/backend/model_manager/load/memory_snapshot.py,sha256=YChwCTvA804__FJ7BUC2gIvQ9dcEpAlRN3H3N7uNqb0,4079
|
|
286
296
|
invokeai/backend/model_manager/load/model_loader_registry.py,sha256=GS1Q6w1bmUCOmE8aTCQqua6Jq0CEoTfDHZrXWU4U8IE,4014
|
|
287
|
-
invokeai/backend/model_manager/load/model_util.py,sha256=
|
|
297
|
+
invokeai/backend/model_manager/load/model_util.py,sha256=W3JdPA1t-ZVzQYZcHMCtiXW-Yx-hEmrPKxSqPS-OxBY,6129
|
|
288
298
|
invokeai/backend/model_manager/load/optimizations.py,sha256=cwGXg5tBv-w5UGKaO5u2o8x6kuKyWC4cPn-EG5CwllQ,1392
|
|
289
299
|
invokeai/backend/model_manager/load/model_cache/__init__.py,sha256=8vmdxdzgTNQQIrEitdbZL3oA5_zGTdIwcfu4_7FdWCM,217
|
|
290
300
|
invokeai/backend/model_manager/load/model_cache/model_cache_base.py,sha256=t-m7r3jhx7UsKdDG8_qBmNgz-cav-a2UclE9bi8eS7U,6216
|
|
@@ -292,12 +302,13 @@ invokeai/backend/model_manager/load/model_cache/model_cache_default.py,sha256=mJ
|
|
|
292
302
|
invokeai/backend/model_manager/load/model_cache/model_locker.py,sha256=Nhc80hZ43OGdvYJdufYEMWP9D0k8eTBjYhApEb0Rgc4,2190
|
|
293
303
|
invokeai/backend/model_manager/load/model_loaders/__init__.py,sha256=EDgdUKmYMiisOPDHtaRU2uwbfSlLbU6hoQybJqVgS7k,37
|
|
294
304
|
invokeai/backend/model_manager/load/model_loaders/controlnet.py,sha256=PBvWgeGpgLmE1CovUrN8gDEpssCHmb5orfwAiOYCj3U,1347
|
|
295
|
-
invokeai/backend/model_manager/load/model_loaders/
|
|
305
|
+
invokeai/backend/model_manager/load/model_loaders/flux.py,sha256=zDr5QNa7wUa_15-SfdiFKGkDlszP7GMijEVV24JX2ow,9345
|
|
306
|
+
invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py,sha256=9cpb8uo810Rhs831hiZsNd_rq1smm064e_tPdmfah90,4881
|
|
296
307
|
invokeai/backend/model_manager/load/model_loaders/ip_adapter.py,sha256=cveZ8G9uoZLvPgL5gi1TOWUQCOZ45lkYGV3X50tqx9c,1327
|
|
297
308
|
invokeai/backend/model_manager/load/model_loaders/lora.py,sha256=N6JeCddj5iXoTEwL5Gdawn2BSAogxFQTCYqZnMkUayE,2580
|
|
298
309
|
invokeai/backend/model_manager/load/model_loaders/onnx.py,sha256=B89aT1_r2MZD5GH7GnPGA-VeA1f9t4RGCmahjHrVdHM,1615
|
|
299
310
|
invokeai/backend/model_manager/load/model_loaders/spandrel_image_to_image.py,sha256=gMKDXaEA74H2A_LnalZ-FsllczdvOiBimDozoiQCTpA,1548
|
|
300
|
-
invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py,sha256=
|
|
311
|
+
invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py,sha256=DOqRuC5bZI2rZcPu1ZcCbzyaBRgcq3oVHvEZUszvaeY,6467
|
|
301
312
|
invokeai/backend/model_manager/load/model_loaders/textual_inversion.py,sha256=u8aaHr91SZp89-51dtQmkRJbtGVBvqHLdSL9lX2Etaw,1722
|
|
302
313
|
invokeai/backend/model_manager/load/model_loaders/vae.py,sha256=eIX4iqbqBA_Rv5mE2otxyLqhzaGM8Iw0WODXzZ4XmKM,1290
|
|
303
314
|
invokeai/backend/model_manager/metadata/__init__.py,sha256=TRImeMpoWu7ZVEH35eLP_w6euT0frVJhupk1D5wZ7bc,1049
|
|
@@ -307,8 +318,14 @@ invokeai/backend/model_manager/metadata/fetch/fetch_base.py,sha256=Z1pwtCfK_aJY0
|
|
|
307
318
|
invokeai/backend/model_manager/metadata/fetch/huggingface.py,sha256=6WjRYd_8G_gnuaCZ3kUVFT9F-JzxCifpQMMFV3b-Yyw,4862
|
|
308
319
|
invokeai/backend/model_manager/util/libc_util.py,sha256=L6K94lsb2LC8zCf-SX-qHPk1IX7Jkj88-Az7OEinWvQ,3176
|
|
309
320
|
invokeai/backend/model_manager/util/model_util.py,sha256=NmLH47PXXLqPP0QCoMkAHlxwyav-01k5A2GhyH0ydgs,5234
|
|
310
|
-
invokeai/backend/model_manager/util/select_hf_files.py,sha256=
|
|
321
|
+
invokeai/backend/model_manager/util/select_hf_files.py,sha256=O4Y-9PuXtiqe2UuRlKnh2Rl-1mxtkZO5DilkLBVKnJE,7731
|
|
311
322
|
invokeai/backend/onnx/onnx_runtime.py,sha256=2x1VY0TD_rOr0EyHL9XnYnL2OrDptLKu-v6ba5xKVHc,9341
|
|
323
|
+
invokeai/backend/quantization/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
324
|
+
invokeai/backend/quantization/bnb_llm_int8.py,sha256=az_FxITDORm2w-J9Vu25ytK8PsAany5r_Cvd0prDtoo,5577
|
|
325
|
+
invokeai/backend/quantization/bnb_nf4.py,sha256=r_Y97abXJh18cUcVkieKu-3WTfvARQ4YGvC9PxqQGoo,7486
|
|
326
|
+
invokeai/backend/quantization/scripts/load_flux_model_bnb_llm_int8.py,sha256=3VX1dqs_mmJLwws038xIjXEeyWApdK5W4A_UulQefoU,3532
|
|
327
|
+
invokeai/backend/quantization/scripts/load_flux_model_bnb_nf4.py,sha256=Q6hl1jcJgYcf8XUv0jyf5eLNGhNY4WSB0_jhN_M80-I,3829
|
|
328
|
+
invokeai/backend/quantization/scripts/quantize_t5_xxl_bnb_llm_int8.py,sha256=OEhZf2e8a4lxqJeaC2QKEXrONy8k1x1kd36oOZn5cJE,4300
|
|
312
329
|
invokeai/backend/stable_diffusion/__init__.py,sha256=g_ewd8QGYcCAUvzvprxEPjwPGC0xXjQQtfzVeDKzIe4,448
|
|
313
330
|
invokeai/backend/stable_diffusion/denoise_context.py,sha256=9iSSdtL_PiwEEjtauuIZOvOWtSO8XVwMs9CD-9gCBac,5425
|
|
314
331
|
invokeai/backend/stable_diffusion/diffusers_pipeline.py,sha256=C_a2nSUs3VfNvheGid1Y5zYZLETM4hb2xhkTXX2vpxI,28460
|
|
@@ -318,7 +335,7 @@ invokeai/backend/stable_diffusion/extensions_manager.py,sha256=8nuxafeEdvnzt6T7T
|
|
|
318
335
|
invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py,sha256=R3ckLReKDOR9gYfHyvKzmsLxs7CuZSGLcz-DufxAy2I,9585
|
|
319
336
|
invokeai/backend/stable_diffusion/vae_tiling.py,sha256=scaK8nIFNQUKs3ERQREilqOPelDOblmeiIeN6nHKBRY,1353
|
|
320
337
|
invokeai/backend/stable_diffusion/diffusion/__init__.py,sha256=jM2_45i6TPyyZnwvJHv24tSwjfOoy2QjsSgsYnP1QAw,190
|
|
321
|
-
invokeai/backend/stable_diffusion/diffusion/conditioning_data.py,sha256=
|
|
338
|
+
invokeai/backend/stable_diffusion/diffusion/conditioning_data.py,sha256=XkLPgWQpdNPmBTjvMaxngGf-otCfVI4hAdwCFpPPBrQ,9928
|
|
322
339
|
invokeai/backend/stable_diffusion/diffusion/custom_atttention.py,sha256=JPgj5jpwR3whUi1pRs_KiDRneOHJXYOZqEq039oH-Hs,9702
|
|
323
340
|
invokeai/backend/stable_diffusion/diffusion/regional_ip_data.py,sha256=827a7P_usxN026r1SW8_DA94dFnCzCthj8P4dK0BuPY,3299
|
|
324
341
|
invokeai/backend/stable_diffusion/diffusion/regional_prompt_data.py,sha256=gjOsipACHNyuzmWBQ9eebFg08h350MX9g4N-bWeNROc,5463
|
|
@@ -374,12 +391,12 @@ invokeai/frontend/cli/arg_parser.py,sha256=6_y2ZB0iDmMvy2b_MJRWSdlqW1hToptmOoFl0
|
|
|
374
391
|
invokeai/frontend/install/__init__.py,sha256=aAlRfyPocCx3Bm7NRB2I3Y_88Ug5omU4M5thFiNaw1A,57
|
|
375
392
|
invokeai/frontend/install/import_images.py,sha256=wddGRowy3HQGEnRHTGAkOA1fhzPz9qT4IbGoZqX9xlc,34417
|
|
376
393
|
invokeai/frontend/web/__init__.py,sha256=H4ye3hqaqVMEuzTnUNGeYBSHDhYLDcUWzS1XOr1zUWw,54
|
|
377
|
-
invokeai/frontend/web/dist/index.html,sha256=
|
|
378
|
-
invokeai/frontend/web/dist/assets/App-CEWYwnr8.js,sha256=v0_idMkutcePfD1CsIYfCyXWnFWLsrqn7lBITqMkWZ8,724866
|
|
394
|
+
invokeai/frontend/web/dist/index.html,sha256=KIjX7JDBV3ye8BUtMTEowugflMpSdJgV0BRUWNM9m2I,682
|
|
379
395
|
invokeai/frontend/web/dist/assets/App-DEu4J2pT.css,sha256=HLO_vKmG5ANNQg2BEeOD8KYUvV8sL3rH0i2S1Cwe4T0,7322
|
|
396
|
+
invokeai/frontend/web/dist/assets/App-DY31F4eS.js,sha256=GJBme6ZtF6Atu3bCKJi_xzdORbJy3OrRY8CmEOUgfR8,719437
|
|
380
397
|
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-B2NxUfIp.css,sha256=AX2miGfzdvzaNF5JUMZdLSevO2nzgbeG8CvPMIBzQMs,15298
|
|
381
|
-
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-
|
|
382
|
-
invokeai/frontend/web/dist/assets/index-
|
|
398
|
+
invokeai/frontend/web/dist/assets/ThemeLocaleProvider-CHTCW1Vd.js,sha256=eivjPxcujNBPO5GQvX6YuAg424YNhvgjtoQ_632WNjA,605
|
|
399
|
+
invokeai/frontend/web/dist/assets/index-DL1l-JWO.js,sha256=yzIcmZRy5mnVvvwm6BittvhAeAKfwZ005dUivabQ8xQ,2143805
|
|
383
400
|
invokeai/frontend/web/dist/assets/inter-cyrillic-ext-wght-normal-B2xhLi22.woff2,sha256=wpY9x2Wg6ArPJplunuyuJEXzN4PVPGFdwBSovnKqwXA,25888
|
|
384
401
|
invokeai/frontend/web/dist/assets/inter-cyrillic-wght-normal-CMZtQduZ.woff2,sha256=YswB2u9yw-p2olhEU2jS9KuNBakfkcU_0S98QuMyWUI,18740
|
|
385
402
|
invokeai/frontend/web/dist/assets/inter-greek-ext-wght-normal-CGAr0uHJ.woff2,sha256=akF8_kBanIieL9Y6ZyibhaaIaBwPpk8SFedKwUcaNv4,11200
|
|
@@ -414,7 +431,7 @@ invokeai/frontend/web/dist/locales/ar.json,sha256=tfE9ecaXVwPjLe6RK1WxypBPJnjiw2
|
|
|
414
431
|
invokeai/frontend/web/dist/locales/az.json,sha256=gssN5Q1lHAHJQj4-qvjg6dP3Sh-ZPNb6pb6FLE7xTxk,62
|
|
415
432
|
invokeai/frontend/web/dist/locales/bg.json,sha256=BlR-CNtSfdYBxKpPNry8czGAHryF1kqC64CEft-nk7w,2797
|
|
416
433
|
invokeai/frontend/web/dist/locales/de.json,sha256=I0a4pKjvNYVFBB92rg9_vxhjpLZiBvAffpCNlsNtBdo,53387
|
|
417
|
-
invokeai/frontend/web/dist/locales/en.json,sha256=
|
|
434
|
+
invokeai/frontend/web/dist/locales/en.json,sha256=1BAr9rFlHt0QOdT0GcWJuaFqHpSg-6ktvCQnlZN3ZaU,85543
|
|
418
435
|
invokeai/frontend/web/dist/locales/es.json,sha256=cM0Y11TD3yagTWW-UEWNsl3UJSczQ9sjXtYKYzdAh1Y,28050
|
|
419
436
|
invokeai/frontend/web/dist/locales/fi.json,sha256=m3l-thXyvRoGIeRbo99AziHPhx5nkcpQEiELOHGeqrc,1725
|
|
420
437
|
invokeai/frontend/web/dist/locales/fr.json,sha256=8iUSmE3g44tFPiOw6sjLbK2ZMHksvIaHzU-FXwCorbs,19666
|
|
@@ -438,14 +455,14 @@ invokeai/frontend/web/dist/locales/zh_CN.json,sha256=_MQCeltlC9IR8AVUmkYqvBVlZuQ
|
|
|
438
455
|
invokeai/frontend/web/dist/locales/zh_Hant.json,sha256=Eij7-HNGo6fy2JXigLXPaQSopWnpE3c5F3l4Y5v3TAE,6705
|
|
439
456
|
invokeai/frontend/web/scripts/clean_translations.py,sha256=-3KLLgb8fvvp0yglxKIkj1Ekr6AQh7OLWIArodu7gy4,3015
|
|
440
457
|
invokeai/frontend/web/static/docs/invoke-favicon-docs.svg,sha256=2DfWaNhjgplnQK8V79yXjQ34JDCk94ZPH29CiTk90y8,779
|
|
441
|
-
invokeai/invocation_api/__init__.py,sha256=
|
|
458
|
+
invokeai/invocation_api/__init__.py,sha256=PJgu7yu-0hukEyAHOm0sfw4kPZ2OvbAnXI6n7NtIt4A,5339
|
|
442
459
|
invokeai/version/__init__.py,sha256=22G4uBmmfLuldDclGKINkVS-rgPfNFQUJH_ag8m_OcM,529
|
|
443
|
-
invokeai/version/invokeai_version.py,sha256=
|
|
444
|
-
InvokeAI-4.2.9.
|
|
445
|
-
InvokeAI-4.2.9.
|
|
446
|
-
InvokeAI-4.2.9.
|
|
447
|
-
InvokeAI-4.2.9.
|
|
448
|
-
InvokeAI-4.2.9.
|
|
449
|
-
InvokeAI-4.2.9.
|
|
450
|
-
InvokeAI-4.2.9.
|
|
451
|
-
InvokeAI-4.2.9.
|
|
460
|
+
invokeai/version/invokeai_version.py,sha256=Ka_f4-2HOmNKxxozrK096OChralMNFdgh5fdnbLj2XM,27
|
|
461
|
+
InvokeAI-4.2.9.dev5.dist-info/LICENSE,sha256=OTAVifLLp_QCl8faBcfl0WT3BMqpeWZbEXw1MgPJzwM,10146
|
|
462
|
+
InvokeAI-4.2.9.dev5.dist-info/LICENSE-SD1+SD2.txt,sha256=GbsztR5dRQiKsmF33NYkGL-GFJhSc2oG9gQPwzo-7Tg,14555
|
|
463
|
+
InvokeAI-4.2.9.dev5.dist-info/LICENSE-SDXL.txt,sha256=JV_wPIqRG79iLCB8HrwY1J8jBimjUblcUxfnDbUMaqs,14213
|
|
464
|
+
InvokeAI-4.2.9.dev5.dist-info/METADATA,sha256=qLXHW8qeDihlx9n4lOngL_u6DjdSOScoFOCBxUkz-1o,27028
|
|
465
|
+
InvokeAI-4.2.9.dev5.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
|
466
|
+
InvokeAI-4.2.9.dev5.dist-info/entry_points.txt,sha256=LCFZSg_rueHvY38L2CPqClMIlBIkCf3LCd564kgtzkw,200
|
|
467
|
+
InvokeAI-4.2.9.dev5.dist-info/top_level.txt,sha256=69I61eoKb6LXiuo6Zz2l_6lhY1eRGBCqP8i9SMqp9Gg,9
|
|
468
|
+
InvokeAI-4.2.9.dev5.dist-info/RECORD,,
|
|
@@ -40,6 +40,7 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
|
|
40
40
|
|
|
41
41
|
# region Model Field Types
|
|
42
42
|
MainModel = "MainModelField"
|
|
43
|
+
FluxMainModel = "FluxMainModelField"
|
|
43
44
|
SDXLMainModel = "SDXLMainModelField"
|
|
44
45
|
SDXLRefinerModel = "SDXLRefinerModelField"
|
|
45
46
|
ONNXModel = "ONNXModelField"
|
|
@@ -48,6 +49,7 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
|
|
48
49
|
ControlNetModel = "ControlNetModelField"
|
|
49
50
|
IPAdapterModel = "IPAdapterModelField"
|
|
50
51
|
T2IAdapterModel = "T2IAdapterModelField"
|
|
52
|
+
T5EncoderModel = "T5EncoderModelField"
|
|
51
53
|
SpandrelImageToImageModel = "SpandrelImageToImageModelField"
|
|
52
54
|
# endregion
|
|
53
55
|
|
|
@@ -125,13 +127,16 @@ class FieldDescriptions:
|
|
|
125
127
|
negative_cond = "Negative conditioning tensor"
|
|
126
128
|
noise = "Noise tensor"
|
|
127
129
|
clip = "CLIP (tokenizer, text encoder, LoRAs) and skipped layer count"
|
|
130
|
+
t5_encoder = "T5 tokenizer and text encoder"
|
|
128
131
|
unet = "UNet (scheduler, LoRAs)"
|
|
132
|
+
transformer = "Transformer"
|
|
129
133
|
vae = "VAE"
|
|
130
134
|
cond = "Conditioning tensor"
|
|
131
135
|
controlnet_model = "ControlNet model to load"
|
|
132
136
|
vae_model = "VAE model to load"
|
|
133
137
|
lora_model = "LoRA model to load"
|
|
134
138
|
main_model = "Main model (UNet, VAE, CLIP) to load"
|
|
139
|
+
flux_model = "Flux model (Transformer) to load"
|
|
135
140
|
sdxl_main_model = "SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load"
|
|
136
141
|
sdxl_refiner_model = "SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load"
|
|
137
142
|
onnx_main_model = "ONNX Main model (UNet, VAE, CLIP) to load"
|
|
@@ -231,6 +236,12 @@ class ColorField(BaseModel):
|
|
|
231
236
|
return (self.r, self.g, self.b, self.a)
|
|
232
237
|
|
|
233
238
|
|
|
239
|
+
class FluxConditioningField(BaseModel):
|
|
240
|
+
"""A conditioning tensor primitive value"""
|
|
241
|
+
|
|
242
|
+
conditioning_name: str = Field(description="The name of conditioning tensor")
|
|
243
|
+
|
|
244
|
+
|
|
234
245
|
class ConditioningField(BaseModel):
|
|
235
246
|
"""A conditioning tensor primitive value"""
|
|
236
247
|
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
|
|
5
|
+
|
|
6
|
+
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
|
7
|
+
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
|
|
8
|
+
from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
|
9
|
+
from invokeai.app.invocations.primitives import FluxConditioningOutput
|
|
10
|
+
from invokeai.app.services.shared.invocation_context import InvocationContext
|
|
11
|
+
from invokeai.backend.flux.modules.conditioner import HFEncoder
|
|
12
|
+
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@invocation(
|
|
16
|
+
"flux_text_encoder",
|
|
17
|
+
title="FLUX Text Encoding",
|
|
18
|
+
tags=["prompt", "conditioning", "flux"],
|
|
19
|
+
category="conditioning",
|
|
20
|
+
version="1.0.0",
|
|
21
|
+
classification=Classification.Prototype,
|
|
22
|
+
)
|
|
23
|
+
class FluxTextEncoderInvocation(BaseInvocation):
|
|
24
|
+
"""Encodes and preps a prompt for a flux image."""
|
|
25
|
+
|
|
26
|
+
clip: CLIPField = InputField(
|
|
27
|
+
title="CLIP",
|
|
28
|
+
description=FieldDescriptions.clip,
|
|
29
|
+
input=Input.Connection,
|
|
30
|
+
)
|
|
31
|
+
t5_encoder: T5EncoderField = InputField(
|
|
32
|
+
title="T5Encoder",
|
|
33
|
+
description=FieldDescriptions.t5_encoder,
|
|
34
|
+
input=Input.Connection,
|
|
35
|
+
)
|
|
36
|
+
t5_max_seq_len: Literal[256, 512] = InputField(
|
|
37
|
+
description="Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models."
|
|
38
|
+
)
|
|
39
|
+
prompt: str = InputField(description="Text prompt to encode.")
|
|
40
|
+
|
|
41
|
+
@torch.no_grad()
|
|
42
|
+
def invoke(self, context: InvocationContext) -> FluxConditioningOutput:
|
|
43
|
+
t5_embeddings, clip_embeddings = self._encode_prompt(context)
|
|
44
|
+
conditioning_data = ConditioningFieldData(
|
|
45
|
+
conditionings=[FLUXConditioningInfo(clip_embeds=clip_embeddings, t5_embeds=t5_embeddings)]
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
conditioning_name = context.conditioning.save(conditioning_data)
|
|
49
|
+
return FluxConditioningOutput.build(conditioning_name)
|
|
50
|
+
|
|
51
|
+
def _encode_prompt(self, context: InvocationContext) -> tuple[torch.Tensor, torch.Tensor]:
|
|
52
|
+
# Load CLIP.
|
|
53
|
+
clip_tokenizer_info = context.models.load(self.clip.tokenizer)
|
|
54
|
+
clip_text_encoder_info = context.models.load(self.clip.text_encoder)
|
|
55
|
+
|
|
56
|
+
# Load T5.
|
|
57
|
+
t5_tokenizer_info = context.models.load(self.t5_encoder.tokenizer)
|
|
58
|
+
t5_text_encoder_info = context.models.load(self.t5_encoder.text_encoder)
|
|
59
|
+
|
|
60
|
+
prompt = [self.prompt]
|
|
61
|
+
|
|
62
|
+
with (
|
|
63
|
+
t5_text_encoder_info as t5_text_encoder,
|
|
64
|
+
t5_tokenizer_info as t5_tokenizer,
|
|
65
|
+
):
|
|
66
|
+
assert isinstance(t5_text_encoder, T5EncoderModel)
|
|
67
|
+
assert isinstance(t5_tokenizer, T5Tokenizer)
|
|
68
|
+
|
|
69
|
+
t5_encoder = HFEncoder(t5_text_encoder, t5_tokenizer, False, self.t5_max_seq_len)
|
|
70
|
+
|
|
71
|
+
prompt_embeds = t5_encoder(prompt)
|
|
72
|
+
|
|
73
|
+
with (
|
|
74
|
+
clip_text_encoder_info as clip_text_encoder,
|
|
75
|
+
clip_tokenizer_info as clip_tokenizer,
|
|
76
|
+
):
|
|
77
|
+
assert isinstance(clip_text_encoder, CLIPTextModel)
|
|
78
|
+
assert isinstance(clip_tokenizer, CLIPTokenizer)
|
|
79
|
+
|
|
80
|
+
clip_encoder = HFEncoder(clip_text_encoder, clip_tokenizer, True, 77)
|
|
81
|
+
|
|
82
|
+
pooled_prompt_embeds = clip_encoder(prompt)
|
|
83
|
+
|
|
84
|
+
assert isinstance(prompt_embeds, torch.Tensor)
|
|
85
|
+
assert isinstance(pooled_prompt_embeds, torch.Tensor)
|
|
86
|
+
return prompt_embeds, pooled_prompt_embeds
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from einops import rearrange
|
|
3
|
+
from PIL import Image
|
|
4
|
+
|
|
5
|
+
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
|
|
6
|
+
from invokeai.app.invocations.fields import (
|
|
7
|
+
FieldDescriptions,
|
|
8
|
+
FluxConditioningField,
|
|
9
|
+
Input,
|
|
10
|
+
InputField,
|
|
11
|
+
WithBoard,
|
|
12
|
+
WithMetadata,
|
|
13
|
+
)
|
|
14
|
+
from invokeai.app.invocations.model import TransformerField, VAEField
|
|
15
|
+
from invokeai.app.invocations.primitives import ImageOutput
|
|
16
|
+
from invokeai.app.services.session_processor.session_processor_common import CanceledException
|
|
17
|
+
from invokeai.app.services.shared.invocation_context import InvocationContext
|
|
18
|
+
from invokeai.backend.flux.model import Flux
|
|
19
|
+
from invokeai.backend.flux.modules.autoencoder import AutoEncoder
|
|
20
|
+
from invokeai.backend.flux.sampling import denoise, get_noise, get_schedule, prepare_latent_img_patches, unpack
|
|
21
|
+
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import FLUXConditioningInfo
|
|
22
|
+
from invokeai.backend.util.devices import TorchDevice
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@invocation(
|
|
26
|
+
"flux_text_to_image",
|
|
27
|
+
title="FLUX Text to Image",
|
|
28
|
+
tags=["image", "flux"],
|
|
29
|
+
category="image",
|
|
30
|
+
version="1.0.0",
|
|
31
|
+
classification=Classification.Prototype,
|
|
32
|
+
)
|
|
33
|
+
class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|
34
|
+
"""Text-to-image generation using a FLUX model."""
|
|
35
|
+
|
|
36
|
+
transformer: TransformerField = InputField(
|
|
37
|
+
description=FieldDescriptions.flux_model,
|
|
38
|
+
input=Input.Connection,
|
|
39
|
+
title="Transformer",
|
|
40
|
+
)
|
|
41
|
+
vae: VAEField = InputField(
|
|
42
|
+
description=FieldDescriptions.vae,
|
|
43
|
+
input=Input.Connection,
|
|
44
|
+
)
|
|
45
|
+
positive_text_conditioning: FluxConditioningField = InputField(
|
|
46
|
+
description=FieldDescriptions.positive_cond, input=Input.Connection
|
|
47
|
+
)
|
|
48
|
+
width: int = InputField(default=1024, multiple_of=16, description="Width of the generated image.")
|
|
49
|
+
height: int = InputField(default=1024, multiple_of=16, description="Height of the generated image.")
|
|
50
|
+
num_steps: int = InputField(
|
|
51
|
+
default=4, description="Number of diffusion steps. Recommend values are schnell: 4, dev: 50."
|
|
52
|
+
)
|
|
53
|
+
guidance: float = InputField(
|
|
54
|
+
default=4.0,
|
|
55
|
+
description="The guidance strength. Higher values adhere more strictly to the prompt, and will produce less diverse images. FLUX dev only, ignored for schnell.",
|
|
56
|
+
)
|
|
57
|
+
seed: int = InputField(default=0, description="Randomness seed for reproducibility.")
|
|
58
|
+
|
|
59
|
+
@torch.no_grad()
|
|
60
|
+
def invoke(self, context: InvocationContext) -> ImageOutput:
|
|
61
|
+
# Load the conditioning data.
|
|
62
|
+
cond_data = context.conditioning.load(self.positive_text_conditioning.conditioning_name)
|
|
63
|
+
assert len(cond_data.conditionings) == 1
|
|
64
|
+
flux_conditioning = cond_data.conditionings[0]
|
|
65
|
+
assert isinstance(flux_conditioning, FLUXConditioningInfo)
|
|
66
|
+
|
|
67
|
+
latents = self._run_diffusion(context, flux_conditioning.clip_embeds, flux_conditioning.t5_embeds)
|
|
68
|
+
image = self._run_vae_decoding(context, latents)
|
|
69
|
+
image_dto = context.images.save(image=image)
|
|
70
|
+
return ImageOutput.build(image_dto)
|
|
71
|
+
|
|
72
|
+
def _run_diffusion(
|
|
73
|
+
self,
|
|
74
|
+
context: InvocationContext,
|
|
75
|
+
clip_embeddings: torch.Tensor,
|
|
76
|
+
t5_embeddings: torch.Tensor,
|
|
77
|
+
):
|
|
78
|
+
transformer_info = context.models.load(self.transformer.transformer)
|
|
79
|
+
inference_dtype = torch.bfloat16
|
|
80
|
+
|
|
81
|
+
# Prepare input noise.
|
|
82
|
+
x = get_noise(
|
|
83
|
+
num_samples=1,
|
|
84
|
+
height=self.height,
|
|
85
|
+
width=self.width,
|
|
86
|
+
device=TorchDevice.choose_torch_device(),
|
|
87
|
+
dtype=inference_dtype,
|
|
88
|
+
seed=self.seed,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
img, img_ids = prepare_latent_img_patches(x)
|
|
92
|
+
|
|
93
|
+
is_schnell = "schnell" in transformer_info.config.config_path
|
|
94
|
+
|
|
95
|
+
timesteps = get_schedule(
|
|
96
|
+
num_steps=self.num_steps,
|
|
97
|
+
image_seq_len=img.shape[1],
|
|
98
|
+
shift=not is_schnell,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
bs, t5_seq_len, _ = t5_embeddings.shape
|
|
102
|
+
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
|
|
103
|
+
|
|
104
|
+
# HACK(ryand): Manually empty the cache. Currently we don't check the size of the model before loading it from
|
|
105
|
+
# disk. Since the transformer model is large (24GB), there's a good chance that it will OOM on 32GB RAM systems
|
|
106
|
+
# if the cache is not empty.
|
|
107
|
+
context.models._services.model_manager.load.ram_cache.make_room(24 * 2**30)
|
|
108
|
+
|
|
109
|
+
with transformer_info as transformer:
|
|
110
|
+
assert isinstance(transformer, Flux)
|
|
111
|
+
|
|
112
|
+
def step_callback() -> None:
|
|
113
|
+
if context.util.is_canceled():
|
|
114
|
+
raise CanceledException
|
|
115
|
+
|
|
116
|
+
# TODO: Make this look like the image before re-enabling
|
|
117
|
+
# latent_image = unpack(img.float(), self.height, self.width)
|
|
118
|
+
# latent_image = latent_image.squeeze() # Remove unnecessary dimensions
|
|
119
|
+
# flattened_tensor = latent_image.reshape(-1) # Flatten to shape [48*128*128]
|
|
120
|
+
|
|
121
|
+
# # Create a new tensor of the required shape [255, 255, 3]
|
|
122
|
+
# latent_image = flattened_tensor[: 255 * 255 * 3].reshape(255, 255, 3) # Reshape to RGB format
|
|
123
|
+
|
|
124
|
+
# # Convert to a NumPy array and then to a PIL Image
|
|
125
|
+
# image = Image.fromarray(latent_image.cpu().numpy().astype(np.uint8))
|
|
126
|
+
|
|
127
|
+
# (width, height) = image.size
|
|
128
|
+
# width *= 8
|
|
129
|
+
# height *= 8
|
|
130
|
+
|
|
131
|
+
# dataURL = image_to_dataURL(image, image_format="JPEG")
|
|
132
|
+
|
|
133
|
+
# # TODO: move this whole function to invocation context to properly reference these variables
|
|
134
|
+
# context._services.events.emit_invocation_denoise_progress(
|
|
135
|
+
# context._data.queue_item,
|
|
136
|
+
# context._data.invocation,
|
|
137
|
+
# state,
|
|
138
|
+
# ProgressImage(dataURL=dataURL, width=width, height=height),
|
|
139
|
+
# )
|
|
140
|
+
|
|
141
|
+
x = denoise(
|
|
142
|
+
model=transformer,
|
|
143
|
+
img=img,
|
|
144
|
+
img_ids=img_ids,
|
|
145
|
+
txt=t5_embeddings,
|
|
146
|
+
txt_ids=txt_ids,
|
|
147
|
+
vec=clip_embeddings,
|
|
148
|
+
timesteps=timesteps,
|
|
149
|
+
step_callback=step_callback,
|
|
150
|
+
guidance=self.guidance,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
x = unpack(x.float(), self.height, self.width)
|
|
154
|
+
|
|
155
|
+
return x
|
|
156
|
+
|
|
157
|
+
def _run_vae_decoding(
|
|
158
|
+
self,
|
|
159
|
+
context: InvocationContext,
|
|
160
|
+
latents: torch.Tensor,
|
|
161
|
+
) -> Image.Image:
|
|
162
|
+
vae_info = context.models.load(self.vae.vae)
|
|
163
|
+
with vae_info as vae:
|
|
164
|
+
assert isinstance(vae, AutoEncoder)
|
|
165
|
+
latents = latents.to(dtype=TorchDevice.choose_torch_dtype())
|
|
166
|
+
img = vae.decode(latents)
|
|
167
|
+
|
|
168
|
+
img = img.clamp(-1, 1)
|
|
169
|
+
img = rearrange(img[0], "c h w -> h w c")
|
|
170
|
+
img_pil = Image.fromarray((127.5 * (img + 1.0)).byte().cpu().numpy())
|
|
171
|
+
|
|
172
|
+
return img_pil
|