InvokeAI 4.2.4__tar.gz → 4.2.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (410) hide show
  1. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/InvokeAI.egg-info/PKG-INFO +1 -1
  2. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/InvokeAI.egg-info/SOURCES.txt +17 -5
  3. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/PKG-INFO +1 -1
  4. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/dependencies.py +1 -1
  5. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/routers/model_manager.py +128 -1
  6. InvokeAI-4.2.5/invokeai/app/invocations/blend_latents.py +98 -0
  7. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/compel.py +13 -4
  8. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/constants.py +3 -0
  9. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/controlnet_image_processors.py +34 -21
  10. InvokeAI-4.2.5/invokeai/app/invocations/create_denoise_mask.py +80 -0
  11. InvokeAI-4.2.5/invokeai/app/invocations/create_gradient_mask.py +138 -0
  12. InvokeAI-4.2.5/invokeai/app/invocations/crop_latents.py +61 -0
  13. InvokeAI-4.2.5/invokeai/app/invocations/denoise_latents.py +848 -0
  14. InvokeAI-4.2.5/invokeai/app/invocations/ideal_size.py +65 -0
  15. InvokeAI-4.2.5/invokeai/app/invocations/image_to_latents.py +125 -0
  16. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/infill.py +10 -5
  17. InvokeAI-4.2.5/invokeai/app/invocations/latents_to_image.py +107 -0
  18. InvokeAI-4.2.5/invokeai/app/invocations/resize_latents.py +103 -0
  19. InvokeAI-4.2.5/invokeai/app/invocations/scheduler.py +34 -0
  20. InvokeAI-4.2.5/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py +281 -0
  21. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/upscale.py +15 -22
  22. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/config/config_default.py +10 -1
  23. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/download/__init__.py +8 -1
  24. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/download/download_base.py +93 -28
  25. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/download/download_default.py +201 -64
  26. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/events/events_base.py +5 -1
  27. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/events/events_common.py +36 -0
  28. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_install/model_install_base.py +3 -4
  29. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_install/model_install_common.py +2 -8
  30. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_install/model_install_default.py +237 -204
  31. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_load/model_load_base.py +26 -2
  32. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_load/model_load_default.py +47 -1
  33. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_records/model_records_base.py +3 -5
  34. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/session_queue/session_queue_sqlite.py +8 -4
  35. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/invocation_context.py +88 -12
  36. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite/sqlite_util.py +2 -0
  37. InvokeAI-4.2.5/invokeai/app/services/shared/sqlite_migrator/migrations/migration_11.py +75 -0
  38. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/controlnet_utils.py +2 -2
  39. InvokeAI-4.2.5/invokeai/backend/image_util/depth_anything/__init__.py +90 -0
  40. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/dw_openpose/__init__.py +39 -8
  41. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/dw_openpose/utils.py +5 -3
  42. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/dw_openpose/wholebody.py +3 -24
  43. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/infill_methods/lama.py +16 -30
  44. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/realesrgan/realesrgan.py +2 -4
  45. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/ip_adapter/ip_adapter.py +7 -4
  46. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/lora.py +29 -22
  47. InvokeAI-4.2.5/invokeai/backend/model_hash/hash_validator.py +26 -0
  48. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/config.py +5 -2
  49. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/__init__.py +2 -1
  50. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/convert_cache/convert_cache_default.py +2 -0
  51. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/load_base.py +58 -4
  52. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/load_default.py +3 -6
  53. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_cache/model_cache_base.py +10 -1
  54. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +8 -8
  55. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_cache/model_locker.py +6 -9
  56. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py +3 -6
  57. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loaders/vae.py +3 -8
  58. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/metadata/fetch/huggingface.py +1 -1
  59. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/metadata/metadata_base.py +4 -1
  60. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/probe.py +11 -3
  61. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_patcher.py +31 -32
  62. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/onnx/onnx_runtime.py +10 -0
  63. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/raw_model.py +16 -2
  64. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/diffusers_pipeline.py +174 -178
  65. InvokeAI-4.2.5/invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py +170 -0
  66. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/textual_inversion.py +12 -0
  67. InvokeAI-4.2.5/invokeai/backend/util/silence_warnings.py +36 -0
  68. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/util.py +29 -21
  69. InvokeAI-4.2.5/invokeai/frontend/web/dist/assets/App-D-nTCJ_n.js +137 -0
  70. InvokeAI-4.2.4/invokeai/frontend/web/dist/assets/ThemeLocaleProvider-CQIRp5vD.js → InvokeAI-4.2.5/invokeai/frontend/web/dist/assets/ThemeLocaleProvider-C00Wxn4y.js +1 -1
  71. InvokeAI-4.2.5/invokeai/frontend/web/dist/assets/index--24GrIy3.js +510 -0
  72. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/index.html +1 -1
  73. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/invocation_api/__init__.py +2 -2
  74. InvokeAI-4.2.5/invokeai/version/invokeai_version.py +1 -0
  75. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/pyproject.toml +1 -1
  76. InvokeAI-4.2.4/invokeai/app/invocations/latent.py +0 -1501
  77. InvokeAI-4.2.4/invokeai/app/util/download_with_progress.py +0 -51
  78. InvokeAI-4.2.4/invokeai/backend/image_util/depth_anything/__init__.py +0 -110
  79. InvokeAI-4.2.4/invokeai/backend/util/silence_warnings.py +0 -29
  80. InvokeAI-4.2.4/invokeai/frontend/web/dist/assets/App-IufUCLxZ.js +0 -137
  81. InvokeAI-4.2.4/invokeai/frontend/web/dist/assets/index-CSQTzMJa.js +0 -510
  82. InvokeAI-4.2.4/invokeai/version/invokeai_version.py +0 -1
  83. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/InvokeAI.egg-info/dependency_links.txt +0 -0
  84. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/InvokeAI.egg-info/entry_points.txt +0 -0
  85. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/InvokeAI.egg-info/requires.txt +0 -0
  86. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/InvokeAI.egg-info/top_level.txt +0 -0
  87. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/LICENSE +0 -0
  88. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/LICENSE-SD1+SD2.txt +0 -0
  89. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/LICENSE-SDXL.txt +0 -0
  90. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/README.md +0 -0
  91. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/__init__.py +0 -0
  92. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/no_cache_staticfiles.py +0 -0
  93. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/routers/app_info.py +0 -0
  94. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/routers/board_images.py +0 -0
  95. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/routers/boards.py +0 -0
  96. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/routers/download_queue.py +0 -0
  97. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/routers/images.py +0 -0
  98. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/routers/session_queue.py +0 -0
  99. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/routers/utilities.py +0 -0
  100. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/routers/workflows.py +0 -0
  101. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api/sockets.py +0 -0
  102. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/api_app.py +0 -0
  103. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/assets/images/caution.png +0 -0
  104. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/__init__.py +0 -0
  105. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/baseinvocation.py +0 -0
  106. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/collections.py +0 -0
  107. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/custom_nodes/README.md +0 -0
  108. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/custom_nodes/init.py +0 -0
  109. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/cv.py +0 -0
  110. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/facetools.py +0 -0
  111. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/fields.py +0 -0
  112. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/image.py +0 -0
  113. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/ip_adapter.py +0 -0
  114. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/mask.py +0 -0
  115. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/math.py +0 -0
  116. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/metadata.py +0 -0
  117. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/model.py +0 -0
  118. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/noise.py +0 -0
  119. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/param_easing.py +0 -0
  120. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/primitives.py +0 -0
  121. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/prompt.py +0 -0
  122. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/sdxl.py +0 -0
  123. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/strings.py +0 -0
  124. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/t2i_adapter.py +0 -0
  125. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/tiles.py +0 -0
  126. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/invocations/util.py +0 -0
  127. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/run_app.py +0 -0
  128. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/__init__.py +0 -0
  129. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_image_records/__init__.py +0 -0
  130. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_image_records/board_image_records_base.py +0 -0
  131. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_image_records/board_image_records_sqlite.py +0 -0
  132. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_images/__init__.py +0 -0
  133. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_images/board_images_base.py +0 -0
  134. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_images/board_images_common.py +0 -0
  135. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_images/board_images_default.py +0 -0
  136. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_records/board_records_base.py +0 -0
  137. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_records/board_records_common.py +0 -0
  138. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/board_records/board_records_sqlite.py +0 -0
  139. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/boards/__init__.py +0 -0
  140. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/boards/boards_base.py +0 -0
  141. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/boards/boards_common.py +0 -0
  142. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/boards/boards_default.py +0 -0
  143. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/bulk_download/__init__.py +0 -0
  144. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/bulk_download/bulk_download_base.py +0 -0
  145. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/bulk_download/bulk_download_common.py +0 -0
  146. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/bulk_download/bulk_download_default.py +0 -0
  147. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/config/__init__.py +0 -0
  148. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/config/config_common.py +0 -0
  149. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/events/__init__.py +0 -0
  150. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/events/events_fastapievents.py +0 -0
  151. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/image_files/__init__.py +0 -0
  152. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/image_files/image_files_base.py +0 -0
  153. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/image_files/image_files_common.py +0 -0
  154. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/image_files/image_files_disk.py +0 -0
  155. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/image_records/__init__.py +0 -0
  156. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/image_records/image_records_base.py +0 -0
  157. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/image_records/image_records_common.py +0 -0
  158. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/image_records/image_records_sqlite.py +0 -0
  159. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/images/__init__.py +0 -0
  160. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/images/images_base.py +0 -0
  161. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/images/images_common.py +0 -0
  162. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/images/images_default.py +0 -0
  163. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invocation_cache/__init__.py +0 -0
  164. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invocation_cache/invocation_cache_base.py +0 -0
  165. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invocation_cache/invocation_cache_common.py +0 -0
  166. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invocation_cache/invocation_cache_memory.py +0 -0
  167. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invocation_services.py +0 -0
  168. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invocation_stats/__init__.py +0 -0
  169. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invocation_stats/invocation_stats_base.py +0 -0
  170. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invocation_stats/invocation_stats_common.py +0 -0
  171. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invocation_stats/invocation_stats_default.py +0 -0
  172. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/invoker.py +0 -0
  173. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/item_storage/__init__.py +0 -0
  174. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/item_storage/item_storage_base.py +0 -0
  175. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/item_storage/item_storage_common.py +0 -0
  176. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/item_storage/item_storage_memory.py +0 -0
  177. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_images/model_images_base.py +0 -0
  178. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_images/model_images_common.py +0 -0
  179. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_images/model_images_default.py +0 -0
  180. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_install/__init__.py +0 -0
  181. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_load/__init__.py +0 -0
  182. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_manager/__init__.py +0 -0
  183. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_manager/model_manager_base.py +0 -0
  184. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_manager/model_manager_common.py +0 -0
  185. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_manager/model_manager_default.py +0 -0
  186. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_records/__init__.py +0 -0
  187. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/model_records/model_records_sql.py +0 -0
  188. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/names/__init__.py +0 -0
  189. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/names/names_base.py +0 -0
  190. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/names/names_common.py +0 -0
  191. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/names/names_default.py +0 -0
  192. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/object_serializer/object_serializer_base.py +0 -0
  193. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/object_serializer/object_serializer_common.py +0 -0
  194. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/object_serializer/object_serializer_disk.py +0 -0
  195. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/object_serializer/object_serializer_forward_cache.py +0 -0
  196. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/session_processor/__init__.py +0 -0
  197. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/session_processor/session_processor_base.py +0 -0
  198. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/session_processor/session_processor_common.py +0 -0
  199. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/session_processor/session_processor_default.py +0 -0
  200. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/session_queue/__init__.py +0 -0
  201. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/session_queue/session_queue_base.py +0 -0
  202. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/session_queue/session_queue_common.py +0 -0
  203. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/__init__.py +0 -0
  204. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/graph.py +0 -0
  205. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/pagination.py +0 -0
  206. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite/__init__.py +0 -0
  207. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite/sqlite_common.py +0 -0
  208. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite/sqlite_database.py +0 -0
  209. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/__init__.py +0 -0
  210. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/__init__.py +0 -0
  211. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_1.py +0 -0
  212. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_10.py +0 -0
  213. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_2.py +0 -0
  214. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_3.py +0 -0
  215. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_4.py +0 -0
  216. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_5.py +0 -0
  217. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_6.py +0 -0
  218. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_7.py +0 -0
  219. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_8.py +0 -0
  220. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/migrations/migration_9.py +0 -0
  221. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/sqlite_migrator_common.py +0 -0
  222. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/shared/sqlite_migrator/sqlite_migrator_impl.py +0 -0
  223. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/urls/__init__.py +0 -0
  224. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/urls/urls_base.py +0 -0
  225. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/urls/urls_default.py +0 -0
  226. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/__init__.py +0 -0
  227. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/default_workflows/ESRGAN Upscaling with Canny ControlNet.json +0 -0
  228. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/default_workflows/Face Detailer with IP-Adapter & Canny (See Note in Details).json +0 -0
  229. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/default_workflows/Multi ControlNet (Canny & Depth).json +0 -0
  230. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/default_workflows/Prompt from File.json +0 -0
  231. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/default_workflows/Text to Image - SD1.5.json +0 -0
  232. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/default_workflows/Text to Image - SDXL.json +0 -0
  233. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/default_workflows/Text to Image with LoRA.json +0 -0
  234. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/default_workflows/Tiled Upscaling (Beta).json +0 -0
  235. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/workflow_records_base.py +0 -0
  236. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/workflow_records_common.py +0 -0
  237. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/services/workflow_records/workflow_records_sqlite.py +0 -0
  238. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/shared/__init__.py +0 -0
  239. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/shared/models.py +0 -0
  240. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/__init__.py +0 -0
  241. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/custom_openapi.py +0 -0
  242. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/metaenum.py +0 -0
  243. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/misc.py +0 -0
  244. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/model_exclude_null.py +0 -0
  245. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/profiler.py +0 -0
  246. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/step_callback.py +0 -0
  247. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/suppress_output.py +0 -0
  248. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/thumbnails.py +0 -0
  249. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/app/util/ti_utils.py +0 -0
  250. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/assets/fonts/inter/Inter-Regular.ttf +0 -0
  251. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/__init__.py +0 -0
  252. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/__init__.py +0 -0
  253. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/basicsr/__init__.py +0 -0
  254. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/basicsr/arch_util.py +0 -0
  255. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/basicsr/rrdbnet_arch.py +0 -0
  256. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/canny.py +0 -0
  257. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/depth_anything/model/blocks.py +0 -0
  258. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/depth_anything/model/dpt.py +0 -0
  259. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/depth_anything/utilities/util.py +0 -0
  260. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/dw_openpose/onnxdet.py +0 -0
  261. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/dw_openpose/onnxpose.py +0 -0
  262. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/hed.py +0 -0
  263. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/infill_methods/cv2_inpaint.py +0 -0
  264. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/infill_methods/mosaic.py +0 -0
  265. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/infill_methods/patchmatch.py +0 -0
  266. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/infill_methods/tile.py +0 -0
  267. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/invisible_watermark.py +0 -0
  268. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/lineart.py +0 -0
  269. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/lineart_anime.py +0 -0
  270. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/pngwriter.py +0 -0
  271. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/realesrgan/__init__.py +0 -0
  272. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/safety_checker.py +0 -0
  273. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/image_util/util.py +0 -0
  274. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/ip_adapter/__init__.py +0 -0
  275. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/ip_adapter/ip_attention_weights.py +0 -0
  276. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/ip_adapter/resampler.py +0 -0
  277. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_hash/model_hash.py +0 -0
  278. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/__init__.py +0 -0
  279. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/convert_ckpt_to_diffusers.py +0 -0
  280. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/libc_util.py +0 -0
  281. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/convert_cache/__init__.py +0 -0
  282. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/convert_cache/convert_cache_base.py +0 -0
  283. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/memory_snapshot.py +0 -0
  284. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_cache/__init__.py +0 -0
  285. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loader_registry.py +0 -0
  286. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loaders/__init__.py +0 -0
  287. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loaders/controlnet.py +0 -0
  288. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loaders/ip_adapter.py +0 -0
  289. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loaders/lora.py +0 -0
  290. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loaders/onnx.py +0 -0
  291. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loaders/stable_diffusion.py +0 -0
  292. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_loaders/textual_inversion.py +0 -0
  293. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/model_util.py +0 -0
  294. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/load/optimizations.py +0 -0
  295. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/merge.py +0 -0
  296. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/metadata/__init__.py +0 -0
  297. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/metadata/fetch/__init__.py +0 -0
  298. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/metadata/fetch/fetch_base.py +0 -0
  299. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/search.py +0 -0
  300. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/starter_models.py +0 -0
  301. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/util/libc_util.py +0 -0
  302. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/util/model_util.py +0 -0
  303. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/model_manager/util/select_hf_files.py +0 -0
  304. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/__init__.py +0 -0
  305. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/diffusion/__init__.py +0 -0
  306. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py +0 -0
  307. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/diffusion/custom_atttention.py +0 -0
  308. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/diffusion/regional_ip_data.py +0 -0
  309. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/diffusion/regional_prompt_data.py +0 -0
  310. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +0 -0
  311. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/diffusion/unet_attention_patcher.py +0 -0
  312. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/schedulers/__init__.py +0 -0
  313. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/schedulers/schedulers.py +0 -0
  314. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/stable_diffusion/seamless.py +0 -0
  315. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/tiles/__init__.py +0 -0
  316. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/tiles/tiles.py +0 -0
  317. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/tiles/utils.py +0 -0
  318. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/__init__.py +0 -0
  319. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/attention.py +0 -0
  320. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/catch_sigint.py +0 -0
  321. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/db_maintenance.py +0 -0
  322. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/devices.py +0 -0
  323. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/hotfixes.py +0 -0
  324. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/logging.py +0 -0
  325. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/mask.py +0 -0
  326. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/mps_fixes.py +0 -0
  327. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/backend/util/test_utils.py +0 -0
  328. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/controlnet/cldm_v15.yaml +0 -0
  329. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/controlnet/cldm_v21.yaml +0 -0
  330. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/sd_xl_base.yaml +0 -0
  331. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/sd_xl_inpaint.yaml +0 -0
  332. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/sd_xl_refiner.yaml +0 -0
  333. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v1-finetune.yaml +0 -0
  334. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v1-finetune_style.yaml +0 -0
  335. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v1-inference-v.yaml +0 -0
  336. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v1-inference.yaml +0 -0
  337. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v1-inpainting-inference.yaml +0 -0
  338. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v1-m1-finetune.yaml +0 -0
  339. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v2-inference-v.yaml +0 -0
  340. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v2-inference.yaml +0 -0
  341. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v2-inpainting-inference-v.yaml +0 -0
  342. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v2-inpainting-inference.yaml +0 -0
  343. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/configs/stable-diffusion/v2-midas-inference.yaml +0 -0
  344. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/__init__.py +0 -0
  345. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/cli/__init__.py +0 -0
  346. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/cli/arg_parser.py +0 -0
  347. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/install/__init__.py +0 -0
  348. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/install/import_images.py +0 -0
  349. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/__init__.py +0 -0
  350. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/App-DEu4J2pT.css +0 -0
  351. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/ThemeLocaleProvider-DzjsLZSc.css +0 -0
  352. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-alert-favicon.svg +0 -0
  353. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-avatar-circle.svg +0 -0
  354. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-avatar-square.svg +0 -0
  355. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-favicon.png +0 -0
  356. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-favicon.svg +0 -0
  357. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-key-char-lrg.svg +0 -0
  358. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-key-char-sml.svg +0 -0
  359. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-key-wht-lrg.svg +0 -0
  360. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-key-wht-sml.svg +0 -0
  361. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-symbol-char-lrg.svg +0 -0
  362. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-symbol-char-sml.svg +0 -0
  363. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-symbol-wht-lrg.svg +0 -0
  364. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-symbol-wht-sml.svg +0 -0
  365. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-symbol-ylw-lrg.svg +0 -0
  366. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-tag-char-lrg.svg +0 -0
  367. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-tag-char-sml.svg +0 -0
  368. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-tag-lrg.svg +0 -0
  369. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-tag-sml.svg +0 -0
  370. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-wordmark-charcoal.svg +0 -0
  371. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/invoke-wordmark-white.svg +0 -0
  372. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/mask.svg +0 -0
  373. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/images/transparent_bg.png +0 -0
  374. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/inter-cyrillic-ext-wght-normal-DIEz8p5i.woff2 +0 -0
  375. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/inter-cyrillic-wght-normal-BmJJXa8e.woff2 +0 -0
  376. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/inter-greek-ext-wght-normal-D5AYLNiq.woff2 +0 -0
  377. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/inter-greek-wght-normal-DyIDNIyN.woff2 +0 -0
  378. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/inter-latin-ext-wght-normal-CN1pIXkb.woff2 +0 -0
  379. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/inter-latin-wght-normal-BgVq2Tq4.woff2 +0 -0
  380. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/assets/inter-vietnamese-wght-normal-_GQuwPVU.woff2 +0 -0
  381. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/ar.json +0 -0
  382. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/az.json +0 -0
  383. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/bg.json +0 -0
  384. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/de.json +0 -0
  385. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/en.json +0 -0
  386. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/es.json +0 -0
  387. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/fi.json +0 -0
  388. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/fr.json +0 -0
  389. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/he.json +0 -0
  390. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/hu.json +0 -0
  391. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/it.json +0 -0
  392. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/ja.json +0 -0
  393. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/ko.json +0 -0
  394. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/mn.json +0 -0
  395. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/nl.json +0 -0
  396. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/pl.json +0 -0
  397. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/pt.json +0 -0
  398. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/pt_BR.json +0 -0
  399. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/ro.json +0 -0
  400. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/ru.json +0 -0
  401. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/sv.json +0 -0
  402. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/tr.json +0 -0
  403. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/uk.json +0 -0
  404. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/vi.json +0 -0
  405. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/zh_CN.json +0 -0
  406. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/dist/locales/zh_Hant.json +0 -0
  407. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/scripts/clean_translations.py +0 -0
  408. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/frontend/web/static/docs/invoke-favicon-docs.svg +0 -0
  409. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/invokeai/version/__init__.py +0 -0
  410. {InvokeAI-4.2.4 → InvokeAI-4.2.5}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: InvokeAI
3
- Version: 4.2.4
3
+ Version: 4.2.5
4
4
  Summary: An implementation of Stable Diffusion which provides various new features and options to aid the image generation process
5
5
  Author-email: The InvokeAI Project <lincoln.stein@gmail.com>
6
6
  License: Apache License
@@ -27,17 +27,24 @@ invokeai/app/api/routers/workflows.py
27
27
  invokeai/app/assets/images/caution.png
28
28
  invokeai/app/invocations/__init__.py
29
29
  invokeai/app/invocations/baseinvocation.py
30
+ invokeai/app/invocations/blend_latents.py
30
31
  invokeai/app/invocations/collections.py
31
32
  invokeai/app/invocations/compel.py
32
33
  invokeai/app/invocations/constants.py
33
34
  invokeai/app/invocations/controlnet_image_processors.py
35
+ invokeai/app/invocations/create_denoise_mask.py
36
+ invokeai/app/invocations/create_gradient_mask.py
37
+ invokeai/app/invocations/crop_latents.py
34
38
  invokeai/app/invocations/cv.py
39
+ invokeai/app/invocations/denoise_latents.py
35
40
  invokeai/app/invocations/facetools.py
36
41
  invokeai/app/invocations/fields.py
42
+ invokeai/app/invocations/ideal_size.py
37
43
  invokeai/app/invocations/image.py
44
+ invokeai/app/invocations/image_to_latents.py
38
45
  invokeai/app/invocations/infill.py
39
46
  invokeai/app/invocations/ip_adapter.py
40
- invokeai/app/invocations/latent.py
47
+ invokeai/app/invocations/latents_to_image.py
41
48
  invokeai/app/invocations/mask.py
42
49
  invokeai/app/invocations/math.py
43
50
  invokeai/app/invocations/metadata.py
@@ -46,9 +53,12 @@ invokeai/app/invocations/noise.py
46
53
  invokeai/app/invocations/param_easing.py
47
54
  invokeai/app/invocations/primitives.py
48
55
  invokeai/app/invocations/prompt.py
56
+ invokeai/app/invocations/resize_latents.py
57
+ invokeai/app/invocations/scheduler.py
49
58
  invokeai/app/invocations/sdxl.py
50
59
  invokeai/app/invocations/strings.py
51
60
  invokeai/app/invocations/t2i_adapter.py
61
+ invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py
52
62
  invokeai/app/invocations/tiles.py
53
63
  invokeai/app/invocations/upscale.py
54
64
  invokeai/app/invocations/util.py
@@ -156,6 +166,7 @@ invokeai/app/services/shared/sqlite_migrator/sqlite_migrator_impl.py
156
166
  invokeai/app/services/shared/sqlite_migrator/migrations/__init__.py
157
167
  invokeai/app/services/shared/sqlite_migrator/migrations/migration_1.py
158
168
  invokeai/app/services/shared/sqlite_migrator/migrations/migration_10.py
169
+ invokeai/app/services/shared/sqlite_migrator/migrations/migration_11.py
159
170
  invokeai/app/services/shared/sqlite_migrator/migrations/migration_2.py
160
171
  invokeai/app/services/shared/sqlite_migrator/migrations/migration_3.py
161
172
  invokeai/app/services/shared/sqlite_migrator/migrations/migration_4.py
@@ -184,7 +195,6 @@ invokeai/app/shared/models.py
184
195
  invokeai/app/util/__init__.py
185
196
  invokeai/app/util/controlnet_utils.py
186
197
  invokeai/app/util/custom_openapi.py
187
- invokeai/app/util/download_with_progress.py
188
198
  invokeai/app/util/metaenum.py
189
199
  invokeai/app/util/misc.py
190
200
  invokeai/app/util/model_exclude_null.py
@@ -231,6 +241,7 @@ invokeai/backend/ip_adapter/__init__.py
231
241
  invokeai/backend/ip_adapter/ip_adapter.py
232
242
  invokeai/backend/ip_adapter/ip_attention_weights.py
233
243
  invokeai/backend/ip_adapter/resampler.py
244
+ invokeai/backend/model_hash/hash_validator.py
234
245
  invokeai/backend/model_hash/model_hash.py
235
246
  invokeai/backend/model_manager/__init__.py
236
247
  invokeai/backend/model_manager/config.py
@@ -274,6 +285,7 @@ invokeai/backend/model_manager/util/select_hf_files.py
274
285
  invokeai/backend/onnx/onnx_runtime.py
275
286
  invokeai/backend/stable_diffusion/__init__.py
276
287
  invokeai/backend/stable_diffusion/diffusers_pipeline.py
288
+ invokeai/backend/stable_diffusion/multi_diffusion_pipeline.py
277
289
  invokeai/backend/stable_diffusion/seamless.py
278
290
  invokeai/backend/stable_diffusion/diffusion/__init__.py
279
291
  invokeai/backend/stable_diffusion/diffusion/conditioning_data.py
@@ -322,11 +334,11 @@ invokeai/frontend/install/__init__.py
322
334
  invokeai/frontend/install/import_images.py
323
335
  invokeai/frontend/web/__init__.py
324
336
  invokeai/frontend/web/dist/index.html
337
+ invokeai/frontend/web/dist/assets/App-D-nTCJ_n.js
325
338
  invokeai/frontend/web/dist/assets/App-DEu4J2pT.css
326
- invokeai/frontend/web/dist/assets/App-IufUCLxZ.js
327
- invokeai/frontend/web/dist/assets/ThemeLocaleProvider-CQIRp5vD.js
339
+ invokeai/frontend/web/dist/assets/ThemeLocaleProvider-C00Wxn4y.js
328
340
  invokeai/frontend/web/dist/assets/ThemeLocaleProvider-DzjsLZSc.css
329
- invokeai/frontend/web/dist/assets/index-CSQTzMJa.js
341
+ invokeai/frontend/web/dist/assets/index--24GrIy3.js
330
342
  invokeai/frontend/web/dist/assets/inter-cyrillic-ext-wght-normal-DIEz8p5i.woff2
331
343
  invokeai/frontend/web/dist/assets/inter-cyrillic-wght-normal-BmJJXa8e.woff2
332
344
  invokeai/frontend/web/dist/assets/inter-greek-ext-wght-normal-D5AYLNiq.woff2
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: InvokeAI
3
- Version: 4.2.4
3
+ Version: 4.2.5
4
4
  Summary: An implementation of Stable Diffusion which provides various new features and options to aid the image generation process
5
5
  Author-email: The InvokeAI Project <lincoln.stein@gmail.com>
6
6
  License: Apache License
@@ -93,7 +93,7 @@ class ApiDependencies:
93
93
  conditioning = ObjectSerializerForwardCache(
94
94
  ObjectSerializerDisk[ConditioningFieldData](output_folder / "conditioning", ephemeral=True)
95
95
  )
96
- download_queue_service = DownloadQueueService(event_bus=events)
96
+ download_queue_service = DownloadQueueService(app_config=configuration, event_bus=events)
97
97
  model_images_service = ModelImageFileStorageDisk(model_images_folder / "model_images")
98
98
  model_manager = ModelManagerService.build_model_manager(
99
99
  app_config=configuration,
@@ -9,7 +9,7 @@ from copy import deepcopy
9
9
  from typing import Any, Dict, List, Optional, Type
10
10
 
11
11
  from fastapi import Body, Path, Query, Response, UploadFile
12
- from fastapi.responses import FileResponse
12
+ from fastapi.responses import FileResponse, HTMLResponse
13
13
  from fastapi.routing import APIRouter
14
14
  from PIL import Image
15
15
  from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field
@@ -502,6 +502,133 @@ async def install_model(
502
502
  return result
503
503
 
504
504
 
505
+ @model_manager_router.get(
506
+ "/install/huggingface",
507
+ operation_id="install_hugging_face_model",
508
+ responses={
509
+ 201: {"description": "The model is being installed"},
510
+ 400: {"description": "Bad request"},
511
+ 409: {"description": "There is already a model corresponding to this path or repo_id"},
512
+ },
513
+ status_code=201,
514
+ response_class=HTMLResponse,
515
+ )
516
+ async def install_hugging_face_model(
517
+ source: str = Query(description="HuggingFace repo_id to install"),
518
+ ) -> HTMLResponse:
519
+ """Install a Hugging Face model using a string identifier."""
520
+
521
+ def generate_html(title: str, heading: str, repo_id: str, is_error: bool, message: str | None = "") -> str:
522
+ if message:
523
+ message = f"<p>{message}</p>"
524
+ title_class = "error" if is_error else "success"
525
+ return f"""
526
+ <html>
527
+
528
+ <head>
529
+ <title>{title}</title>
530
+ <style>
531
+ body {{
532
+ text-align: center;
533
+ background-color: hsl(220 12% 10% / 1);
534
+ font-family: Helvetica, sans-serif;
535
+ color: hsl(220 12% 86% / 1);
536
+ }}
537
+
538
+ .repo-id {{
539
+ color: hsl(220 12% 68% / 1);
540
+ }}
541
+
542
+ .error {{
543
+ color: hsl(0 42% 68% / 1)
544
+ }}
545
+
546
+ .message-box {{
547
+ display: inline-block;
548
+ border-radius: 5px;
549
+ background-color: hsl(220 12% 20% / 1);
550
+ padding-inline-end: 30px;
551
+ padding: 20px;
552
+ padding-inline-start: 30px;
553
+ padding-inline-end: 30px;
554
+ }}
555
+
556
+ .container {{
557
+ display: flex;
558
+ width: 100%;
559
+ height: 100%;
560
+ align-items: center;
561
+ justify-content: center;
562
+ }}
563
+
564
+ a {{
565
+ color: inherit
566
+ }}
567
+
568
+ a:visited {{
569
+ color: inherit
570
+ }}
571
+
572
+ a:active {{
573
+ color: inherit
574
+ }}
575
+ </style>
576
+ </head>
577
+
578
+ <body style="background-color: hsl(220 12% 10% / 1);">
579
+ <div class="container">
580
+ <div class="message-box">
581
+ <h2 class="{title_class}">{heading}</h2>
582
+ {message}
583
+ <p class="repo-id">Repo ID: {repo_id}</p>
584
+ </div>
585
+ </div>
586
+ </body>
587
+
588
+ </html>
589
+ """
590
+
591
+ try:
592
+ metadata = HuggingFaceMetadataFetch().from_id(source)
593
+ assert isinstance(metadata, ModelMetadataWithFiles)
594
+ except UnknownMetadataException:
595
+ title = "Unable to Install Model"
596
+ heading = "No HuggingFace repository found with that repo ID."
597
+ message = "Ensure the repo ID is correct and try again."
598
+ return HTMLResponse(content=generate_html(title, heading, source, True, message), status_code=400)
599
+
600
+ logger = ApiDependencies.invoker.services.logger
601
+
602
+ try:
603
+ installer = ApiDependencies.invoker.services.model_manager.install
604
+ if metadata.is_diffusers:
605
+ installer.heuristic_import(
606
+ source=source,
607
+ inplace=False,
608
+ )
609
+ elif metadata.ckpt_urls is not None and len(metadata.ckpt_urls) == 1:
610
+ installer.heuristic_import(
611
+ source=str(metadata.ckpt_urls[0]),
612
+ inplace=False,
613
+ )
614
+ else:
615
+ title = "Unable to Install Model"
616
+ heading = "This HuggingFace repo has multiple models."
617
+ message = "Please use the Model Manager to install this model."
618
+ return HTMLResponse(content=generate_html(title, heading, source, True, message), status_code=200)
619
+
620
+ title = "Model Install Started"
621
+ heading = "Your HuggingFace model is installing now."
622
+ message = "You can close this tab and check the Model Manager for installation progress."
623
+ return HTMLResponse(content=generate_html(title, heading, source, False, message), status_code=201)
624
+ except Exception as e:
625
+ logger.error(str(e))
626
+ title = "Unable to Install Model"
627
+ heading = "There was an problem installing this model."
628
+ message = 'Please use the Model Manager directly to install this model. If the issue persists, ask for help on <a href="https://discord.gg/ZmtBAhwWhy">discord</a>.'
629
+ return HTMLResponse(content=generate_html(title, heading, source, True, message), status_code=500)
630
+
631
+
505
632
  @model_manager_router.get(
506
633
  "/install",
507
634
  operation_id="list_model_installs",
@@ -0,0 +1,98 @@
1
+ from typing import Any, Union
2
+
3
+ import numpy as np
4
+ import numpy.typing as npt
5
+ import torch
6
+
7
+ from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
8
+ from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, LatentsField
9
+ from invokeai.app.invocations.primitives import LatentsOutput
10
+ from invokeai.app.services.shared.invocation_context import InvocationContext
11
+ from invokeai.backend.util.devices import TorchDevice
12
+
13
+
14
+ @invocation(
15
+ "lblend",
16
+ title="Blend Latents",
17
+ tags=["latents", "blend"],
18
+ category="latents",
19
+ version="1.0.3",
20
+ )
21
+ class BlendLatentsInvocation(BaseInvocation):
22
+ """Blend two latents using a given alpha. Latents must have same size."""
23
+
24
+ latents_a: LatentsField = InputField(
25
+ description=FieldDescriptions.latents,
26
+ input=Input.Connection,
27
+ )
28
+ latents_b: LatentsField = InputField(
29
+ description=FieldDescriptions.latents,
30
+ input=Input.Connection,
31
+ )
32
+ alpha: float = InputField(default=0.5, description=FieldDescriptions.blend_alpha)
33
+
34
+ def invoke(self, context: InvocationContext) -> LatentsOutput:
35
+ latents_a = context.tensors.load(self.latents_a.latents_name)
36
+ latents_b = context.tensors.load(self.latents_b.latents_name)
37
+
38
+ if latents_a.shape != latents_b.shape:
39
+ raise Exception("Latents to blend must be the same size.")
40
+
41
+ device = TorchDevice.choose_torch_device()
42
+
43
+ def slerp(
44
+ t: Union[float, npt.NDArray[Any]], # FIXME: maybe use np.float32 here?
45
+ v0: Union[torch.Tensor, npt.NDArray[Any]],
46
+ v1: Union[torch.Tensor, npt.NDArray[Any]],
47
+ DOT_THRESHOLD: float = 0.9995,
48
+ ) -> Union[torch.Tensor, npt.NDArray[Any]]:
49
+ """
50
+ Spherical linear interpolation
51
+ Args:
52
+ t (float/np.ndarray): Float value between 0.0 and 1.0
53
+ v0 (np.ndarray): Starting vector
54
+ v1 (np.ndarray): Final vector
55
+ DOT_THRESHOLD (float): Threshold for considering the two vectors as
56
+ colineal. Not recommended to alter this.
57
+ Returns:
58
+ v2 (np.ndarray): Interpolation vector between v0 and v1
59
+ """
60
+ inputs_are_torch = False
61
+ if not isinstance(v0, np.ndarray):
62
+ inputs_are_torch = True
63
+ v0 = v0.detach().cpu().numpy()
64
+ if not isinstance(v1, np.ndarray):
65
+ inputs_are_torch = True
66
+ v1 = v1.detach().cpu().numpy()
67
+
68
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
69
+ if np.abs(dot) > DOT_THRESHOLD:
70
+ v2 = (1 - t) * v0 + t * v1
71
+ else:
72
+ theta_0 = np.arccos(dot)
73
+ sin_theta_0 = np.sin(theta_0)
74
+ theta_t = theta_0 * t
75
+ sin_theta_t = np.sin(theta_t)
76
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
77
+ s1 = sin_theta_t / sin_theta_0
78
+ v2 = s0 * v0 + s1 * v1
79
+
80
+ if inputs_are_torch:
81
+ v2_torch: torch.Tensor = torch.from_numpy(v2).to(device)
82
+ return v2_torch
83
+ else:
84
+ assert isinstance(v2, np.ndarray)
85
+ return v2
86
+
87
+ # blend
88
+ bl = slerp(self.alpha, latents_a, latents_b)
89
+ assert isinstance(bl, torch.Tensor)
90
+ blended_latents: torch.Tensor = bl # for type checking convenience
91
+
92
+ # https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
93
+ blended_latents = blended_latents.to("cpu")
94
+
95
+ TorchDevice.empty_cache()
96
+
97
+ name = context.tensors.save(tensor=blended_latents)
98
+ return LatentsOutput.build(latents_name=name, latents=blended_latents, seed=self.latents_a.seed)
@@ -81,9 +81,13 @@ class CompelInvocation(BaseInvocation):
81
81
 
82
82
  with (
83
83
  # apply all patches while the model is on the target device
84
- text_encoder_info as text_encoder,
84
+ text_encoder_info.model_on_device() as (model_state_dict, text_encoder),
85
85
  tokenizer_info as tokenizer,
86
- ModelPatcher.apply_lora_text_encoder(text_encoder, _lora_loader()),
86
+ ModelPatcher.apply_lora_text_encoder(
87
+ text_encoder,
88
+ loras=_lora_loader(),
89
+ model_state_dict=model_state_dict,
90
+ ),
87
91
  # Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
88
92
  ModelPatcher.apply_clip_skip(text_encoder, self.clip.skipped_layers),
89
93
  ModelPatcher.apply_ti(tokenizer, text_encoder, ti_list) as (
@@ -172,9 +176,14 @@ class SDXLPromptInvocationBase:
172
176
 
173
177
  with (
174
178
  # apply all patches while the model is on the target device
175
- text_encoder_info as text_encoder,
179
+ text_encoder_info.model_on_device() as (state_dict, text_encoder),
176
180
  tokenizer_info as tokenizer,
177
- ModelPatcher.apply_lora(text_encoder, _lora_loader(), lora_prefix),
181
+ ModelPatcher.apply_lora(
182
+ text_encoder,
183
+ loras=_lora_loader(),
184
+ prefix=lora_prefix,
185
+ model_state_dict=state_dict,
186
+ ),
178
187
  # Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
179
188
  ModelPatcher.apply_clip_skip(text_encoder, clip_field.skipped_layers),
180
189
  ModelPatcher.apply_ti(tokenizer, text_encoder, ti_list) as (
@@ -1,6 +1,7 @@
1
1
  from typing import Literal
2
2
 
3
3
  from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP
4
+ from invokeai.backend.util.devices import TorchDevice
4
5
 
5
6
  LATENT_SCALE_FACTOR = 8
6
7
  """
@@ -15,3 +16,5 @@ SCHEDULER_NAME_VALUES = Literal[tuple(SCHEDULER_MAP.keys())]
15
16
 
16
17
  IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"]
17
18
  """A literal type for PIL image modes supported by Invoke"""
19
+
20
+ DEFAULT_PRECISION = TorchDevice.choose_torch_dtype()
@@ -2,6 +2,7 @@
2
2
  # initial implementation by Gregg Helt, 2023
3
3
  # heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux
4
4
  from builtins import bool, float
5
+ from pathlib import Path
5
6
  from typing import Dict, List, Literal, Union
6
7
 
7
8
  import cv2
@@ -36,12 +37,13 @@ from invokeai.app.invocations.util import validate_begin_end_step, validate_weig
36
37
  from invokeai.app.services.shared.invocation_context import InvocationContext
37
38
  from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
38
39
  from invokeai.backend.image_util.canny import get_canny_edges
39
- from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
40
- from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
40
+ from invokeai.backend.image_util.depth_anything import DEPTH_ANYTHING_MODELS, DepthAnythingDetector
41
+ from invokeai.backend.image_util.dw_openpose import DWPOSE_MODELS, DWOpenposeDetector
41
42
  from invokeai.backend.image_util.hed import HEDProcessor
42
43
  from invokeai.backend.image_util.lineart import LineartProcessor
43
44
  from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
44
45
  from invokeai.backend.image_util.util import np_to_pil, pil_to_np
46
+ from invokeai.backend.util.devices import TorchDevice
45
47
 
46
48
  from .baseinvocation import BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output
47
49
 
@@ -139,6 +141,7 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
139
141
  return context.images.get_pil(self.image.image_name, "RGB")
140
142
 
141
143
  def invoke(self, context: InvocationContext) -> ImageOutput:
144
+ self._context = context
142
145
  raw_image = self.load_image(context)
143
146
  # image type should be PIL.PngImagePlugin.PngImageFile ?
144
147
  processed_image = self.run_processor(raw_image)
@@ -284,7 +287,8 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
284
287
  # depth_and_normal not supported in controlnet_aux v0.0.3
285
288
  # depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
286
289
 
287
- def run_processor(self, image):
290
+ def run_processor(self, image: Image.Image) -> Image.Image:
291
+ # TODO: replace from_pretrained() calls with context.models.download_and_cache() (or similar)
288
292
  midas_processor = MidasDetector.from_pretrained("lllyasviel/Annotators")
289
293
  processed_image = midas_processor(
290
294
  image,
@@ -311,7 +315,7 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
311
315
  detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
312
316
  image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
313
317
 
314
- def run_processor(self, image):
318
+ def run_processor(self, image: Image.Image) -> Image.Image:
315
319
  normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
316
320
  processed_image = normalbae_processor(
317
321
  image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution
@@ -330,7 +334,7 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
330
334
  thr_v: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_v`")
331
335
  thr_d: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_d`")
332
336
 
333
- def run_processor(self, image):
337
+ def run_processor(self, image: Image.Image) -> Image.Image:
334
338
  mlsd_processor = MLSDdetector.from_pretrained("lllyasviel/Annotators")
335
339
  processed_image = mlsd_processor(
336
340
  image,
@@ -353,7 +357,7 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
353
357
  safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
354
358
  scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
355
359
 
356
- def run_processor(self, image):
360
+ def run_processor(self, image: Image.Image) -> Image.Image:
357
361
  pidi_processor = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
358
362
  processed_image = pidi_processor(
359
363
  image,
@@ -381,7 +385,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
381
385
  w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter")
382
386
  f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter")
383
387
 
384
- def run_processor(self, image):
388
+ def run_processor(self, image: Image.Image) -> Image.Image:
385
389
  content_shuffle_processor = ContentShuffleDetector()
386
390
  processed_image = content_shuffle_processor(
387
391
  image,
@@ -405,7 +409,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
405
409
  class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
406
410
  """Applies Zoe depth processing to image"""
407
411
 
408
- def run_processor(self, image):
412
+ def run_processor(self, image: Image.Image) -> Image.Image:
409
413
  zoe_depth_processor = ZoeDetector.from_pretrained("lllyasviel/Annotators")
410
414
  processed_image = zoe_depth_processor(image)
411
415
  return processed_image
@@ -426,7 +430,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
426
430
  detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
427
431
  image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
428
432
 
429
- def run_processor(self, image):
433
+ def run_processor(self, image: Image.Image) -> Image.Image:
430
434
  mediapipe_face_processor = MediapipeFaceDetector()
431
435
  processed_image = mediapipe_face_processor(
432
436
  image,
@@ -454,7 +458,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
454
458
  detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
455
459
  image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
456
460
 
457
- def run_processor(self, image):
461
+ def run_processor(self, image: Image.Image) -> Image.Image:
458
462
  leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators")
459
463
  processed_image = leres_processor(
460
464
  image,
@@ -496,8 +500,8 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
496
500
  np_img = cv2.resize(np_img, (W, H), interpolation=cv2.INTER_AREA)
497
501
  return np_img
498
502
 
499
- def run_processor(self, img):
500
- np_img = np.array(img, dtype=np.uint8)
503
+ def run_processor(self, image: Image.Image) -> Image.Image:
504
+ np_img = np.array(image, dtype=np.uint8)
501
505
  processed_np_image = self.tile_resample(
502
506
  np_img,
503
507
  # res=self.tile_size,
@@ -520,7 +524,7 @@ class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
520
524
  detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
521
525
  image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
522
526
 
523
- def run_processor(self, image):
527
+ def run_processor(self, image: Image.Image) -> Image.Image:
524
528
  # segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
525
529
  segment_anything_processor = SamDetectorReproducibleColors.from_pretrained(
526
530
  "ybelkada/segment-anything", subfolder="checkpoints"
@@ -566,7 +570,7 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
566
570
 
567
571
  color_map_tile_size: int = InputField(default=64, ge=1, description=FieldDescriptions.tile_size)
568
572
 
569
- def run_processor(self, image: Image.Image):
573
+ def run_processor(self, image: Image.Image) -> Image.Image:
570
574
  np_image = np.array(image, dtype=np.uint8)
571
575
  height, width = np_image.shape[:2]
572
576
 
@@ -601,12 +605,18 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
601
605
  )
602
606
  resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
603
607
 
604
- def run_processor(self, image: Image.Image):
605
- depth_anything_detector = DepthAnythingDetector()
606
- depth_anything_detector.load_model(model_size=self.model_size)
608
+ def run_processor(self, image: Image.Image) -> Image.Image:
609
+ def loader(model_path: Path):
610
+ return DepthAnythingDetector.load_model(
611
+ model_path, model_size=self.model_size, device=TorchDevice.choose_torch_device()
612
+ )
607
613
 
608
- processed_image = depth_anything_detector(image=image, resolution=self.resolution)
609
- return processed_image
614
+ with self._context.models.load_remote_model(
615
+ source=DEPTH_ANYTHING_MODELS[self.model_size], loader=loader
616
+ ) as model:
617
+ depth_anything_detector = DepthAnythingDetector(model, TorchDevice.choose_torch_device())
618
+ processed_image = depth_anything_detector(image=image, resolution=self.resolution)
619
+ return processed_image
610
620
 
611
621
 
612
622
  @invocation(
@@ -624,8 +634,11 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
624
634
  draw_hands: bool = InputField(default=False)
625
635
  image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
626
636
 
627
- def run_processor(self, image: Image.Image):
628
- dw_openpose = DWOpenposeDetector()
637
+ def run_processor(self, image: Image.Image) -> Image.Image:
638
+ onnx_det = self._context.models.download_and_cache_model(DWPOSE_MODELS["yolox_l.onnx"])
639
+ onnx_pose = self._context.models.download_and_cache_model(DWPOSE_MODELS["dw-ll_ucoco_384.onnx"])
640
+
641
+ dw_openpose = DWOpenposeDetector(onnx_det=onnx_det, onnx_pose=onnx_pose)
629
642
  processed_image = dw_openpose(
630
643
  image,
631
644
  draw_face=self.draw_face,
@@ -0,0 +1,80 @@
1
+ from typing import Optional
2
+
3
+ import torch
4
+ import torchvision.transforms as T
5
+ from PIL import Image
6
+ from torchvision.transforms.functional import resize as tv_resize
7
+
8
+ from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
9
+ from invokeai.app.invocations.constants import DEFAULT_PRECISION
10
+ from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField
11
+ from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
12
+ from invokeai.app.invocations.model import VAEField
13
+ from invokeai.app.invocations.primitives import DenoiseMaskOutput
14
+ from invokeai.app.services.shared.invocation_context import InvocationContext
15
+ from invokeai.backend.stable_diffusion.diffusers_pipeline import image_resized_to_grid_as_tensor
16
+
17
+
18
+ @invocation(
19
+ "create_denoise_mask",
20
+ title="Create Denoise Mask",
21
+ tags=["mask", "denoise"],
22
+ category="latents",
23
+ version="1.0.2",
24
+ )
25
+ class CreateDenoiseMaskInvocation(BaseInvocation):
26
+ """Creates mask for denoising model run."""
27
+
28
+ vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0)
29
+ image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1)
30
+ mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
31
+ tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
32
+ fp32: bool = InputField(
33
+ default=DEFAULT_PRECISION == torch.float32,
34
+ description=FieldDescriptions.fp32,
35
+ ui_order=4,
36
+ )
37
+
38
+ def prep_mask_tensor(self, mask_image: Image.Image) -> torch.Tensor:
39
+ if mask_image.mode != "L":
40
+ mask_image = mask_image.convert("L")
41
+ mask_tensor: torch.Tensor = image_resized_to_grid_as_tensor(mask_image, normalize=False)
42
+ if mask_tensor.dim() == 3:
43
+ mask_tensor = mask_tensor.unsqueeze(0)
44
+ # if shape is not None:
45
+ # mask_tensor = tv_resize(mask_tensor, shape, T.InterpolationMode.BILINEAR)
46
+ return mask_tensor
47
+
48
+ @torch.no_grad()
49
+ def invoke(self, context: InvocationContext) -> DenoiseMaskOutput:
50
+ if self.image is not None:
51
+ image = context.images.get_pil(self.image.image_name)
52
+ image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
53
+ if image_tensor.dim() == 3:
54
+ image_tensor = image_tensor.unsqueeze(0)
55
+ else:
56
+ image_tensor = None
57
+
58
+ mask = self.prep_mask_tensor(
59
+ context.images.get_pil(self.mask.image_name),
60
+ )
61
+
62
+ if image_tensor is not None:
63
+ vae_info = context.models.load(self.vae.vae)
64
+
65
+ img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
66
+ masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
67
+ # TODO:
68
+ masked_latents = ImageToLatentsInvocation.vae_encode(vae_info, self.fp32, self.tiled, masked_image.clone())
69
+
70
+ masked_latents_name = context.tensors.save(tensor=masked_latents)
71
+ else:
72
+ masked_latents_name = None
73
+
74
+ mask_name = context.tensors.save(tensor=mask)
75
+
76
+ return DenoiseMaskOutput.build(
77
+ mask_name=mask_name,
78
+ masked_latents_name=masked_latents_name,
79
+ gradient=False,
80
+ )