ollamadiffuser 2.0.2__tar.gz → 2.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. {ollamadiffuser-2.0.2/ollamadiffuser.egg-info → ollamadiffuser-2.0.3}/PKG-INFO +1 -1
  2. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/__init__.py +1 -1
  3. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/flux_strategy.py +2 -2
  4. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/generic_strategy.py +3 -14
  5. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/hidream_strategy.py +1 -2
  6. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/video_strategy.py +1 -2
  7. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3/ollamadiffuser.egg-info}/PKG-INFO +1 -1
  8. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/pyproject.toml +1 -1
  9. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/tests/test_engine.py +3 -3
  10. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/CHANGELOG.md +0 -0
  11. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/LICENSE +0 -0
  12. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/MANIFEST.in +0 -0
  13. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/README.md +0 -0
  14. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/__main__.py +0 -0
  15. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/api/__init__.py +0 -0
  16. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/api/server.py +0 -0
  17. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/cli/__init__.py +0 -0
  18. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/cli/commands.py +0 -0
  19. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/cli/config_commands.py +0 -0
  20. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/cli/lora_commands.py +0 -0
  21. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/cli/main.py +0 -0
  22. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/cli/model_commands.py +0 -0
  23. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/cli/recommend_command.py +0 -0
  24. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/cli/registry_commands.py +0 -0
  25. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/__init__.py +0 -0
  26. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/config/__init__.py +0 -0
  27. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/config/model_registry.py +0 -0
  28. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/config/settings.py +0 -0
  29. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/__init__.py +0 -0
  30. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/base.py +0 -0
  31. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/engine.py +0 -0
  32. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/__init__.py +0 -0
  33. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/controlnet_strategy.py +0 -0
  34. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/gguf_strategy.py +0 -0
  35. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/sd15_strategy.py +0 -0
  36. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/sd3_strategy.py +0 -0
  37. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/inference/strategies/sdxl_strategy.py +0 -0
  38. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/models/__init__.py +0 -0
  39. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/models/gguf_loader.py +0 -0
  40. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/models/manager.py +0 -0
  41. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/utils/__init__.py +0 -0
  42. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/utils/controlnet_preprocessors.py +0 -0
  43. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/utils/download_utils.py +0 -0
  44. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/core/utils/lora_manager.py +0 -0
  45. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/mcp/__init__.py +0 -0
  46. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/mcp/server.py +0 -0
  47. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/__init__.py +0 -0
  48. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/canny/geometric_shapes.png +0 -0
  49. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/canny/house_outline.png +0 -0
  50. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/canny/portrait_outline.png +0 -0
  51. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/depth/linear_perspective.png +0 -0
  52. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/depth/radial_gradient.png +0 -0
  53. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/depth/sphere_3d.png +0 -0
  54. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/metadata.json +0 -0
  55. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/openpose/running_pose.png +0 -0
  56. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/openpose/sitting_pose.png +0 -0
  57. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/openpose/standing_pose.png +0 -0
  58. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/scribble/car_sketch.png +0 -0
  59. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/scribble/face_sketch.png +0 -0
  60. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/samples/scribble/tree_sketch.png +0 -0
  61. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/templates/index.html +0 -0
  62. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/ui/web.py +0 -0
  63. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser/utils/__init__.py +0 -0
  64. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser.egg-info/SOURCES.txt +0 -0
  65. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser.egg-info/dependency_links.txt +0 -0
  66. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser.egg-info/entry_points.txt +0 -0
  67. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser.egg-info/not-zip-safe +0 -0
  68. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser.egg-info/requires.txt +0 -0
  69. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/ollamadiffuser.egg-info/top_level.txt +0 -0
  70. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/requirements.txt +0 -0
  71. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/setup.cfg +0 -0
  72. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/setup.py +0 -0
  73. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/tests/test_api_base64.py +0 -0
  74. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/tests/test_api_server.py +0 -0
  75. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/tests/test_mcp_server.py +0 -0
  76. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/tests/test_model_registry.py +0 -0
  77. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/tests/test_mps_support.py +0 -0
  78. {ollamadiffuser-2.0.2 → ollamadiffuser-2.0.3}/tests/test_settings.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ollamadiffuser
3
- Version: 2.0.2
3
+ Version: 2.0.3
4
4
  Summary: Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX, and LoRA support
5
5
  Home-page: https://github.com/ollamadiffuser/ollamadiffuser
6
6
  Author: OllamaDiffuser Team
@@ -4,7 +4,7 @@ OllamaDiffuser - Local AI Image Generation with Ollama-style CLI
4
4
  A tool for managing and running Stable Diffusion, FLUX.1, and other AI image generation models locally.
5
5
  """
6
6
 
7
- __version__ = "2.0.2"
7
+ __version__ = "2.0.3"
8
8
  __author__ = "OllamaDiffuser Team"
9
9
  __email__ = "ollamadiffuser@gmail.com"
10
10
  __description__ = "🎨 Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX.1, and LoRA support"
@@ -51,11 +51,11 @@ class FluxStrategy(InferenceStrategy):
51
51
  model_config.path, **load_kwargs
52
52
  )
53
53
 
54
- if device in ("cuda", "mps") and hasattr(self.pipeline, "enable_model_cpu_offload"):
55
- # CPU offloading manages device placement itself — don't call _move_to_device
54
+ if device == "cuda" and hasattr(self.pipeline, "enable_model_cpu_offload"):
56
55
  self.pipeline.enable_model_cpu_offload(device=device)
57
56
  logger.info(f"Enabled CPU offloading for FLUX on {device}")
58
57
  else:
58
+ # MPS: unified memory means CPU offload adds overhead without saving memory
59
59
  self._move_to_device(device)
60
60
  self._apply_memory_optimizations()
61
61
 
@@ -72,20 +72,8 @@ class GenericPipelineStrategy(InferenceStrategy):
72
72
 
73
73
  # Device placement
74
74
  enable_offload = params.get("enable_cpu_offload", False)
75
- # Auto-enable CPU offload on MPS to avoid OOM on unified memory
76
- if device == "mps":
77
- enable_offload = True
78
-
79
- if enable_offload and device in ("cuda", "mps"):
80
- if device == "mps" and hasattr(self.pipeline, "enable_model_cpu_offload"):
81
- # MPS/unified memory: model-level offload is more effective than
82
- # sequential offload because it fully deallocates entire components
83
- # (T5 encoder, transformer, VAE) between stages, reducing peak
84
- # memory pressure on the MPS allocator.
85
- self.pipeline.enable_model_cpu_offload(device=device)
86
- logger.info(f"Enabled model CPU offloading on {device}")
87
- elif hasattr(self.pipeline, "enable_sequential_cpu_offload"):
88
- # CUDA: sequential offload moves individual layers, lowest VRAM usage
75
+ if enable_offload and device == "cuda":
76
+ if hasattr(self.pipeline, "enable_sequential_cpu_offload"):
89
77
  self.pipeline.enable_sequential_cpu_offload(device=device)
90
78
  logger.info(f"Enabled sequential CPU offloading on {device}")
91
79
  elif hasattr(self.pipeline, "enable_model_cpu_offload"):
@@ -94,6 +82,7 @@ class GenericPipelineStrategy(InferenceStrategy):
94
82
  else:
95
83
  self._move_to_device(device)
96
84
  else:
85
+ # MPS: unified memory means CPU offload adds overhead without saving memory
97
86
  self._move_to_device(device)
98
87
 
99
88
  self._apply_memory_optimizations()
@@ -44,8 +44,7 @@ class HiDreamStrategy(InferenceStrategy):
44
44
  model_config.path, **load_kwargs
45
45
  )
46
46
 
47
- if device in ("cuda", "mps") and hasattr(self.pipeline, "enable_model_cpu_offload"):
48
- # CPU offloading manages device placement itself — don't call _move_to_device
47
+ if device == "cuda" and hasattr(self.pipeline, "enable_model_cpu_offload"):
49
48
  self.pipeline.enable_model_cpu_offload(device=device)
50
49
  else:
51
50
  self._move_to_device(device)
@@ -46,8 +46,7 @@ class VideoStrategy(InferenceStrategy):
46
46
  steps_offset=1,
47
47
  )
48
48
 
49
- if device in ("cuda", "mps") and hasattr(self.pipeline, "enable_model_cpu_offload"):
50
- # CPU offloading manages device placement itself — don't call _move_to_device
49
+ if device == "cuda" and hasattr(self.pipeline, "enable_model_cpu_offload"):
51
50
  self.pipeline.enable_model_cpu_offload(device=device)
52
51
  else:
53
52
  self._move_to_device(device)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ollamadiffuser
3
- Version: 2.0.2
3
+ Version: 2.0.3
4
4
  Summary: Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX, and LoRA support
5
5
  Home-page: https://github.com/ollamadiffuser/ollamadiffuser
6
6
  Author: OllamaDiffuser Team
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ollamadiffuser"
7
- version = "2.0.2"
7
+ version = "2.0.3"
8
8
  authors = [
9
9
  {name = "OllamaDiffuser Team", email = "ollamadiffuser@gmail.com"}
10
10
  ]
@@ -242,9 +242,9 @@ class TestGenericStrategy:
242
242
  def test_generic_strategy_cpu_offload_mps(self):
243
243
  result, mock_pipe = self._load_generic_with_offload("mps")
244
244
  assert result is True
245
- # MPS prefers model-level offload (more effective on unified memory)
246
- mock_pipe.enable_model_cpu_offload.assert_called_once()
247
- mock_pipe.to.assert_not_called()
245
+ # MPS: unified memory means CPU offload adds overhead, so load directly to device
246
+ mock_pipe.enable_model_cpu_offload.assert_not_called()
247
+ mock_pipe.to.assert_called_once()
248
248
 
249
249
 
250
250
  class TestInferenceStrategyBase:
File without changes
File without changes
File without changes
File without changes