ollamadiffuser 2.0.3__tar.gz → 2.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. {ollamadiffuser-2.0.3/ollamadiffuser.egg-info → ollamadiffuser-2.0.5}/PKG-INFO +3 -3
  2. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/__init__.py +3 -3
  3. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/flux_strategy.py +2 -6
  4. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/generic_strategy.py +2 -0
  5. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/hidream_strategy.py +2 -4
  6. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5/ollamadiffuser.egg-info}/PKG-INFO +3 -3
  7. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/pyproject.toml +2 -2
  8. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/setup.py +2 -2
  9. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/CHANGELOG.md +0 -0
  10. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/LICENSE +0 -0
  11. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/MANIFEST.in +0 -0
  12. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/README.md +0 -0
  13. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/__main__.py +0 -0
  14. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/api/__init__.py +0 -0
  15. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/api/server.py +0 -0
  16. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/cli/__init__.py +0 -0
  17. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/cli/commands.py +0 -0
  18. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/cli/config_commands.py +0 -0
  19. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/cli/lora_commands.py +0 -0
  20. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/cli/main.py +0 -0
  21. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/cli/model_commands.py +0 -0
  22. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/cli/recommend_command.py +0 -0
  23. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/cli/registry_commands.py +0 -0
  24. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/__init__.py +0 -0
  25. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/config/__init__.py +0 -0
  26. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/config/model_registry.py +0 -0
  27. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/config/settings.py +0 -0
  28. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/__init__.py +0 -0
  29. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/base.py +0 -0
  30. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/engine.py +0 -0
  31. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/__init__.py +0 -0
  32. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/controlnet_strategy.py +0 -0
  33. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/gguf_strategy.py +0 -0
  34. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/sd15_strategy.py +0 -0
  35. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/sd3_strategy.py +0 -0
  36. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/sdxl_strategy.py +0 -0
  37. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/inference/strategies/video_strategy.py +0 -0
  38. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/models/__init__.py +0 -0
  39. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/models/gguf_loader.py +0 -0
  40. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/models/manager.py +0 -0
  41. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/utils/__init__.py +0 -0
  42. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/utils/controlnet_preprocessors.py +0 -0
  43. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/utils/download_utils.py +0 -0
  44. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/core/utils/lora_manager.py +0 -0
  45. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/mcp/__init__.py +0 -0
  46. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/mcp/server.py +0 -0
  47. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/__init__.py +0 -0
  48. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/canny/geometric_shapes.png +0 -0
  49. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/canny/house_outline.png +0 -0
  50. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/canny/portrait_outline.png +0 -0
  51. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/depth/linear_perspective.png +0 -0
  52. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/depth/radial_gradient.png +0 -0
  53. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/depth/sphere_3d.png +0 -0
  54. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/metadata.json +0 -0
  55. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/openpose/running_pose.png +0 -0
  56. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/openpose/sitting_pose.png +0 -0
  57. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/openpose/standing_pose.png +0 -0
  58. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/scribble/car_sketch.png +0 -0
  59. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/scribble/face_sketch.png +0 -0
  60. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/samples/scribble/tree_sketch.png +0 -0
  61. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/templates/index.html +0 -0
  62. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/ui/web.py +0 -0
  63. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser/utils/__init__.py +0 -0
  64. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser.egg-info/SOURCES.txt +0 -0
  65. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser.egg-info/dependency_links.txt +0 -0
  66. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser.egg-info/entry_points.txt +0 -0
  67. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser.egg-info/not-zip-safe +0 -0
  68. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser.egg-info/requires.txt +0 -0
  69. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/ollamadiffuser.egg-info/top_level.txt +0 -0
  70. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/requirements.txt +0 -0
  71. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/setup.cfg +0 -0
  72. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/tests/test_api_base64.py +0 -0
  73. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/tests/test_api_server.py +0 -0
  74. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/tests/test_engine.py +0 -0
  75. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/tests/test_mcp_server.py +0 -0
  76. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/tests/test_model_registry.py +0 -0
  77. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/tests/test_mps_support.py +0 -0
  78. {ollamadiffuser-2.0.3 → ollamadiffuser-2.0.5}/tests/test_settings.py +0 -0
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ollamadiffuser
3
- Version: 2.0.3
3
+ Version: 2.0.5
4
4
  Summary: Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX, and LoRA support
5
5
  Home-page: https://github.com/ollamadiffuser/ollamadiffuser
6
- Author: OllamaDiffuser Team
7
- Author-email: OllamaDiffuser Team <ollamadiffuser@gmail.com>
6
+ Author: LocalKinAI Team
7
+ Author-email: LocalKinAI Team <contact@localkin.ai>
8
8
  License: MIT
9
9
  Project-URL: Homepage, https://www.ollamadiffuser.com/
10
10
  Project-URL: Website, https://www.ollamadiffuser.com/
@@ -4,9 +4,9 @@ OllamaDiffuser - Local AI Image Generation with Ollama-style CLI
4
4
  A tool for managing and running Stable Diffusion, FLUX.1, and other AI image generation models locally.
5
5
  """
6
6
 
7
- __version__ = "2.0.3"
8
- __author__ = "OllamaDiffuser Team"
9
- __email__ = "ollamadiffuser@gmail.com"
7
+ __version__ = "2.0.5"
8
+ __author__ = "LocalKinAI Team"
9
+ __email__ = "contact@localkin.ai"
10
10
  __description__ = "🎨 Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX.1, and LoRA support"
11
11
  __url__ = "https://www.ollamadiffuser.com/"
12
12
  __repository__ = "https://github.com/ollamadiffuser/ollamadiffuser"
@@ -39,10 +39,8 @@ class FluxStrategy(InferenceStrategy):
39
39
  load_kwargs["torch_dtype"] = torch.float32
40
40
  logger.warning("FLUX on CPU will be very slow for this 12B parameter model")
41
41
  elif device == "mps":
42
- # MPS has limited bfloat16 support; float16 avoids VAE decode crashes
43
- load_kwargs["torch_dtype"] = torch.float16
42
+ load_kwargs["torch_dtype"] = torch.bfloat16
44
43
  load_kwargs["use_safetensors"] = True
45
- load_kwargs["low_cpu_mem_usage"] = True
46
44
  else:
47
45
  load_kwargs["torch_dtype"] = torch.bfloat16
48
46
  load_kwargs["use_safetensors"] = True
@@ -107,9 +105,7 @@ class FluxStrategy(InferenceStrategy):
107
105
 
108
106
  max_seq_len = kwargs.get("max_sequence_length", params.get("max_sequence_length", 512))
109
107
 
110
- # CPU offload moves tensors between CPU/device; use CPU generator to avoid device mismatches
111
- gen_device = "cpu" if self.device == "mps" else self.device
112
- generator, used_seed = self._make_generator(seed, gen_device)
108
+ generator, used_seed = self._make_generator(seed, self.device)
113
109
 
114
110
  gen_kwargs = {
115
111
  "prompt": prompt,
@@ -64,6 +64,8 @@ class GenericPipelineStrategy(InferenceStrategy):
64
64
  load_kwargs = {"torch_dtype": dtype, "low_cpu_mem_usage": True}
65
65
  if dtype in (torch.float16, torch.bfloat16):
66
66
  load_kwargs["use_safetensors"] = True
67
+ if model_config.variant:
68
+ load_kwargs["variant"] = model_config.variant
67
69
 
68
70
  logger.info(f"Loading {pipeline_class_name} from {model_config.path} (dtype={dtype})")
69
71
  self.pipeline = pipeline_cls.from_pretrained(
@@ -35,8 +35,7 @@ class HiDreamStrategy(InferenceStrategy):
35
35
  if device == "cpu":
36
36
  load_kwargs["torch_dtype"] = torch.float32
37
37
  elif device == "mps":
38
- load_kwargs["torch_dtype"] = torch.float16
39
- load_kwargs["low_cpu_mem_usage"] = True
38
+ load_kwargs["torch_dtype"] = torch.bfloat16
40
39
  else:
41
40
  load_kwargs["torch_dtype"] = torch.bfloat16
42
41
 
@@ -79,8 +78,7 @@ class HiDreamStrategy(InferenceStrategy):
79
78
  guidance = guidance_scale if guidance_scale is not None else params.get("guidance_scale", 5.0)
80
79
  max_seq_len = kwargs.get("max_sequence_length", params.get("max_sequence_length", 128))
81
80
 
82
- gen_device = "cpu" if self.device == "mps" else self.device
83
- generator, used_seed = self._make_generator(seed, gen_device)
81
+ generator, used_seed = self._make_generator(seed, self.device)
84
82
 
85
83
  gen_kwargs = {
86
84
  "prompt": prompt,
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ollamadiffuser
3
- Version: 2.0.3
3
+ Version: 2.0.5
4
4
  Summary: Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX, and LoRA support
5
5
  Home-page: https://github.com/ollamadiffuser/ollamadiffuser
6
- Author: OllamaDiffuser Team
7
- Author-email: OllamaDiffuser Team <ollamadiffuser@gmail.com>
6
+ Author: LocalKinAI Team
7
+ Author-email: LocalKinAI Team <contact@localkin.ai>
8
8
  License: MIT
9
9
  Project-URL: Homepage, https://www.ollamadiffuser.com/
10
10
  Project-URL: Website, https://www.ollamadiffuser.com/
@@ -4,9 +4,9 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ollamadiffuser"
7
- version = "2.0.3"
7
+ version = "2.0.5"
8
8
  authors = [
9
- {name = "OllamaDiffuser Team", email = "ollamadiffuser@gmail.com"}
9
+ {name = "LocalKinAI Team", email = "contact@localkin.ai"}
10
10
  ]
11
11
  description = "Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX, and LoRA support"
12
12
  readme = "README.md"
@@ -44,8 +44,8 @@ REQUIRED = [
44
44
  setup(
45
45
  name="ollamadiffuser",
46
46
  version=get_version(),
47
- author="OllamaDiffuser Team",
48
- author_email="ollamadiffuser@gmail.com",
47
+ author="LocalKinAI Team",
48
+ author_email="contact@localkin.ai",
49
49
  description="🎨 Ollama-like image generation model management tool with LoRA support",
50
50
  long_description=long_description,
51
51
  long_description_content_type="text/markdown",
File without changes
File without changes
File without changes