ollamadiffuser 2.0.2__py3-none-any.whl → 2.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ollamadiffuser/__init__.py +3 -3
- ollamadiffuser/core/inference/strategies/flux_strategy.py +4 -8
- ollamadiffuser/core/inference/strategies/generic_strategy.py +3 -14
- ollamadiffuser/core/inference/strategies/hidream_strategy.py +3 -6
- ollamadiffuser/core/inference/strategies/video_strategy.py +1 -2
- {ollamadiffuser-2.0.2.dist-info → ollamadiffuser-2.0.4.dist-info}/METADATA +3 -3
- {ollamadiffuser-2.0.2.dist-info → ollamadiffuser-2.0.4.dist-info}/RECORD +11 -11
- {ollamadiffuser-2.0.2.dist-info → ollamadiffuser-2.0.4.dist-info}/WHEEL +0 -0
- {ollamadiffuser-2.0.2.dist-info → ollamadiffuser-2.0.4.dist-info}/entry_points.txt +0 -0
- {ollamadiffuser-2.0.2.dist-info → ollamadiffuser-2.0.4.dist-info}/licenses/LICENSE +0 -0
- {ollamadiffuser-2.0.2.dist-info → ollamadiffuser-2.0.4.dist-info}/top_level.txt +0 -0
ollamadiffuser/__init__.py
CHANGED
|
@@ -4,9 +4,9 @@ OllamaDiffuser - Local AI Image Generation with Ollama-style CLI
|
|
|
4
4
|
A tool for managing and running Stable Diffusion, FLUX.1, and other AI image generation models locally.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
-
__version__ = "2.0.
|
|
8
|
-
__author__ = "
|
|
9
|
-
__email__ = "
|
|
7
|
+
__version__ = "2.0.4"
|
|
8
|
+
__author__ = "LocalKinAI Team"
|
|
9
|
+
__email__ = "contact@localkin.ai"
|
|
10
10
|
__description__ = "🎨 Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX.1, and LoRA support"
|
|
11
11
|
__url__ = "https://www.ollamadiffuser.com/"
|
|
12
12
|
__repository__ = "https://github.com/ollamadiffuser/ollamadiffuser"
|
|
@@ -39,10 +39,8 @@ class FluxStrategy(InferenceStrategy):
|
|
|
39
39
|
load_kwargs["torch_dtype"] = torch.float32
|
|
40
40
|
logger.warning("FLUX on CPU will be very slow for this 12B parameter model")
|
|
41
41
|
elif device == "mps":
|
|
42
|
-
|
|
43
|
-
load_kwargs["torch_dtype"] = torch.float16
|
|
42
|
+
load_kwargs["torch_dtype"] = torch.bfloat16
|
|
44
43
|
load_kwargs["use_safetensors"] = True
|
|
45
|
-
load_kwargs["low_cpu_mem_usage"] = True
|
|
46
44
|
else:
|
|
47
45
|
load_kwargs["torch_dtype"] = torch.bfloat16
|
|
48
46
|
load_kwargs["use_safetensors"] = True
|
|
@@ -51,11 +49,11 @@ class FluxStrategy(InferenceStrategy):
|
|
|
51
49
|
model_config.path, **load_kwargs
|
|
52
50
|
)
|
|
53
51
|
|
|
54
|
-
if device
|
|
55
|
-
# CPU offloading manages device placement itself — don't call _move_to_device
|
|
52
|
+
if device == "cuda" and hasattr(self.pipeline, "enable_model_cpu_offload"):
|
|
56
53
|
self.pipeline.enable_model_cpu_offload(device=device)
|
|
57
54
|
logger.info(f"Enabled CPU offloading for FLUX on {device}")
|
|
58
55
|
else:
|
|
56
|
+
# MPS: unified memory means CPU offload adds overhead without saving memory
|
|
59
57
|
self._move_to_device(device)
|
|
60
58
|
self._apply_memory_optimizations()
|
|
61
59
|
|
|
@@ -107,9 +105,7 @@ class FluxStrategy(InferenceStrategy):
|
|
|
107
105
|
|
|
108
106
|
max_seq_len = kwargs.get("max_sequence_length", params.get("max_sequence_length", 512))
|
|
109
107
|
|
|
110
|
-
|
|
111
|
-
gen_device = "cpu" if self.device == "mps" else self.device
|
|
112
|
-
generator, used_seed = self._make_generator(seed, gen_device)
|
|
108
|
+
generator, used_seed = self._make_generator(seed, self.device)
|
|
113
109
|
|
|
114
110
|
gen_kwargs = {
|
|
115
111
|
"prompt": prompt,
|
|
@@ -72,20 +72,8 @@ class GenericPipelineStrategy(InferenceStrategy):
|
|
|
72
72
|
|
|
73
73
|
# Device placement
|
|
74
74
|
enable_offload = params.get("enable_cpu_offload", False)
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
enable_offload = True
|
|
78
|
-
|
|
79
|
-
if enable_offload and device in ("cuda", "mps"):
|
|
80
|
-
if device == "mps" and hasattr(self.pipeline, "enable_model_cpu_offload"):
|
|
81
|
-
# MPS/unified memory: model-level offload is more effective than
|
|
82
|
-
# sequential offload because it fully deallocates entire components
|
|
83
|
-
# (T5 encoder, transformer, VAE) between stages, reducing peak
|
|
84
|
-
# memory pressure on the MPS allocator.
|
|
85
|
-
self.pipeline.enable_model_cpu_offload(device=device)
|
|
86
|
-
logger.info(f"Enabled model CPU offloading on {device}")
|
|
87
|
-
elif hasattr(self.pipeline, "enable_sequential_cpu_offload"):
|
|
88
|
-
# CUDA: sequential offload moves individual layers, lowest VRAM usage
|
|
75
|
+
if enable_offload and device == "cuda":
|
|
76
|
+
if hasattr(self.pipeline, "enable_sequential_cpu_offload"):
|
|
89
77
|
self.pipeline.enable_sequential_cpu_offload(device=device)
|
|
90
78
|
logger.info(f"Enabled sequential CPU offloading on {device}")
|
|
91
79
|
elif hasattr(self.pipeline, "enable_model_cpu_offload"):
|
|
@@ -94,6 +82,7 @@ class GenericPipelineStrategy(InferenceStrategy):
|
|
|
94
82
|
else:
|
|
95
83
|
self._move_to_device(device)
|
|
96
84
|
else:
|
|
85
|
+
# MPS: unified memory means CPU offload adds overhead without saving memory
|
|
97
86
|
self._move_to_device(device)
|
|
98
87
|
|
|
99
88
|
self._apply_memory_optimizations()
|
|
@@ -35,8 +35,7 @@ class HiDreamStrategy(InferenceStrategy):
|
|
|
35
35
|
if device == "cpu":
|
|
36
36
|
load_kwargs["torch_dtype"] = torch.float32
|
|
37
37
|
elif device == "mps":
|
|
38
|
-
load_kwargs["torch_dtype"] = torch.
|
|
39
|
-
load_kwargs["low_cpu_mem_usage"] = True
|
|
38
|
+
load_kwargs["torch_dtype"] = torch.bfloat16
|
|
40
39
|
else:
|
|
41
40
|
load_kwargs["torch_dtype"] = torch.bfloat16
|
|
42
41
|
|
|
@@ -44,8 +43,7 @@ class HiDreamStrategy(InferenceStrategy):
|
|
|
44
43
|
model_config.path, **load_kwargs
|
|
45
44
|
)
|
|
46
45
|
|
|
47
|
-
if device
|
|
48
|
-
# CPU offloading manages device placement itself — don't call _move_to_device
|
|
46
|
+
if device == "cuda" and hasattr(self.pipeline, "enable_model_cpu_offload"):
|
|
49
47
|
self.pipeline.enable_model_cpu_offload(device=device)
|
|
50
48
|
else:
|
|
51
49
|
self._move_to_device(device)
|
|
@@ -80,8 +78,7 @@ class HiDreamStrategy(InferenceStrategy):
|
|
|
80
78
|
guidance = guidance_scale if guidance_scale is not None else params.get("guidance_scale", 5.0)
|
|
81
79
|
max_seq_len = kwargs.get("max_sequence_length", params.get("max_sequence_length", 128))
|
|
82
80
|
|
|
83
|
-
|
|
84
|
-
generator, used_seed = self._make_generator(seed, gen_device)
|
|
81
|
+
generator, used_seed = self._make_generator(seed, self.device)
|
|
85
82
|
|
|
86
83
|
gen_kwargs = {
|
|
87
84
|
"prompt": prompt,
|
|
@@ -46,8 +46,7 @@ class VideoStrategy(InferenceStrategy):
|
|
|
46
46
|
steps_offset=1,
|
|
47
47
|
)
|
|
48
48
|
|
|
49
|
-
if device
|
|
50
|
-
# CPU offloading manages device placement itself — don't call _move_to_device
|
|
49
|
+
if device == "cuda" and hasattr(self.pipeline, "enable_model_cpu_offload"):
|
|
51
50
|
self.pipeline.enable_model_cpu_offload(device=device)
|
|
52
51
|
else:
|
|
53
52
|
self._move_to_device(device)
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ollamadiffuser
|
|
3
|
-
Version: 2.0.
|
|
3
|
+
Version: 2.0.4
|
|
4
4
|
Summary: Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX, and LoRA support
|
|
5
5
|
Home-page: https://github.com/ollamadiffuser/ollamadiffuser
|
|
6
|
-
Author:
|
|
7
|
-
Author-email:
|
|
6
|
+
Author: LocalKinAI Team
|
|
7
|
+
Author-email: LocalKinAI Team <contact@localkin.ai>
|
|
8
8
|
License: MIT
|
|
9
9
|
Project-URL: Homepage, https://www.ollamadiffuser.com/
|
|
10
10
|
Project-URL: Website, https://www.ollamadiffuser.com/
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
ollamadiffuser/__init__.py,sha256=
|
|
1
|
+
ollamadiffuser/__init__.py,sha256=8Y6N4xTVGOic3QVkvCBEh9Si48jzROTmEH2ODArjFxo,1118
|
|
2
2
|
ollamadiffuser/__main__.py,sha256=tNWMvEHq4ddtKLp7DrhIoOdnFw3F8RNrETC_u5xpkFI,141
|
|
3
3
|
ollamadiffuser/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
ollamadiffuser/api/server.py,sha256=MqEKjckz8x8pU9dhZlLXET5XWt7ERfVcAfvdVfDLFWw,15464
|
|
@@ -19,14 +19,14 @@ ollamadiffuser/core/inference/base.py,sha256=AyoM6j37nhhh5RXQeH9Ycn9x1_eRPQQfC5n
|
|
|
19
19
|
ollamadiffuser/core/inference/engine.py,sha256=xUCSQmGke9yAIoKGzh9jRaH1XE3GP9-uGM2hseuUzao,8139
|
|
20
20
|
ollamadiffuser/core/inference/strategies/__init__.py,sha256=5LQgTeS5JVin-HiGX7UvjlPzd0awombKrNhvAVr3SSw,53
|
|
21
21
|
ollamadiffuser/core/inference/strategies/controlnet_strategy.py,sha256=_lGlCaYhrrdn7N6Aw0X9a4L90wKPjCrr6EBfQqPVH2E,6712
|
|
22
|
-
ollamadiffuser/core/inference/strategies/flux_strategy.py,sha256=
|
|
23
|
-
ollamadiffuser/core/inference/strategies/generic_strategy.py,sha256=
|
|
22
|
+
ollamadiffuser/core/inference/strategies/flux_strategy.py,sha256=V11exUxKw256YGACGcLQD0Kzla6ZHF0HnxrKT5Ri1Cw,5351
|
|
23
|
+
ollamadiffuser/core/inference/strategies/generic_strategy.py,sha256=IyCuIDi-MyscUD4LVvbrF6ZcpJRsLbDfQ6juHFU7JHU,6129
|
|
24
24
|
ollamadiffuser/core/inference/strategies/gguf_strategy.py,sha256=kIGT85tDCcSsliXdaxEJoQz4Gm7Xt7TfEcu6xcmTvJg,3893
|
|
25
|
-
ollamadiffuser/core/inference/strategies/hidream_strategy.py,sha256=
|
|
25
|
+
ollamadiffuser/core/inference/strategies/hidream_strategy.py,sha256=27XSeXQDvlDl7yMlVhkHzf7tdDr7udQyPyYowkDdZmM,3696
|
|
26
26
|
ollamadiffuser/core/inference/strategies/sd15_strategy.py,sha256=qz5eGA2xkcA_3oNywP-rCliXzP7jYpH60728QmOT5fw,4966
|
|
27
27
|
ollamadiffuser/core/inference/strategies/sd3_strategy.py,sha256=6DjWebeyjaH7jiRm8hf2ismkJ3Gth69u71enVgMMPi8,2772
|
|
28
28
|
ollamadiffuser/core/inference/strategies/sdxl_strategy.py,sha256=tslfENJIvEhDuj1D6aClFF6hv8i0JO2PukFQZsTCwQY,5137
|
|
29
|
-
ollamadiffuser/core/inference/strategies/video_strategy.py,sha256=
|
|
29
|
+
ollamadiffuser/core/inference/strategies/video_strategy.py,sha256=WnzTkWY_b70kZQa0j4w6iSEnv0gwkqKG8IaAYrP3bRI,3834
|
|
30
30
|
ollamadiffuser/core/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
31
31
|
ollamadiffuser/core/models/gguf_loader.py,sha256=ocfl3_MDVXC9nSjW8YJdz4kX1Q-Qe2ltu6w4fbqhxVY,35724
|
|
32
32
|
ollamadiffuser/core/models/manager.py,sha256=rTEAameGih3wPcVG_Y-4k_brBeEqEoBjoI7fjggNtiY,16799
|
|
@@ -53,9 +53,9 @@ ollamadiffuser/ui/samples/scribble/face_sketch.png,sha256=MVVYy_aS48xoS_RnIDzLUa
|
|
|
53
53
|
ollamadiffuser/ui/samples/scribble/tree_sketch.png,sha256=3P-NGgW25xRwreDxiBYKcDhd2oHZAwKSkjNVM5oPTWY,3017
|
|
54
54
|
ollamadiffuser/ui/templates/index.html,sha256=XcrYZqtDR65dAiu959Ea19t3MbtYmXl9PVyMnR1Telk,42358
|
|
55
55
|
ollamadiffuser/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
56
|
-
ollamadiffuser-2.0.
|
|
57
|
-
ollamadiffuser-2.0.
|
|
58
|
-
ollamadiffuser-2.0.
|
|
59
|
-
ollamadiffuser-2.0.
|
|
60
|
-
ollamadiffuser-2.0.
|
|
61
|
-
ollamadiffuser-2.0.
|
|
56
|
+
ollamadiffuser-2.0.4.dist-info/licenses/LICENSE,sha256=cnGL9l2P510Uk3TCnv62kot6vAfdSawhOZh7Y-oYoIE,1071
|
|
57
|
+
ollamadiffuser-2.0.4.dist-info/METADATA,sha256=XmErikjPgUWhElQlE_9_JQ_OtG7IFdd9Jx6jXtw3jx4,31084
|
|
58
|
+
ollamadiffuser-2.0.4.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
59
|
+
ollamadiffuser-2.0.4.dist-info/entry_points.txt,sha256=Bp-ZzV3F7QpQu02Mcafeza-oTMjDslomz9qrhvfcQUA,116
|
|
60
|
+
ollamadiffuser-2.0.4.dist-info/top_level.txt,sha256=97wOGgTCxDE765Nr_o7B4Kwr_M_jy8fCCeQ81sMKlC4,15
|
|
61
|
+
ollamadiffuser-2.0.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|