ollamadiffuser 1.2.0__tar.gz → 1.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {ollamadiffuser-1.2.0/ollamadiffuser.egg-info → ollamadiffuser-1.2.2}/PKG-INFO +58 -2
  2. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/README.md +57 -1
  3. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/__init__.py +1 -1
  4. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/api/server.py +10 -2
  5. ollamadiffuser-1.2.2/ollamadiffuser/core/models/gguf_loader.py +771 -0
  6. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2/ollamadiffuser.egg-info}/PKG-INFO +58 -2
  7. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/pyproject.toml +1 -1
  8. ollamadiffuser-1.2.0/ollamadiffuser/core/models/gguf_loader.py +0 -437
  9. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/CHANGELOG.md +0 -0
  10. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/LICENSE +0 -0
  11. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/MANIFEST.in +0 -0
  12. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/__main__.py +0 -0
  13. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/api/__init__.py +0 -0
  14. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/cli/__init__.py +0 -0
  15. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/cli/commands.py +0 -0
  16. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/cli/main.py +0 -0
  17. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/__init__.py +0 -0
  18. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/config/__init__.py +0 -0
  19. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/config/model_registry.py +0 -0
  20. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/config/settings.py +0 -0
  21. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/inference/__init__.py +0 -0
  22. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/inference/engine.py +0 -0
  23. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/models/__init__.py +0 -0
  24. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/models/manager.py +0 -0
  25. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/models/registry.py +0 -0
  26. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/utils/__init__.py +0 -0
  27. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/utils/controlnet_preprocessors.py +0 -0
  28. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/utils/download_utils.py +0 -0
  29. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/core/utils/lora_manager.py +0 -0
  30. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/__init__.py +0 -0
  31. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/.DS_Store +0 -0
  32. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/canny/geometric_shapes.png +0 -0
  33. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/canny/house_outline.png +0 -0
  34. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/canny/portrait_outline.png +0 -0
  35. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/depth/linear_perspective.png +0 -0
  36. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/depth/radial_gradient.png +0 -0
  37. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/depth/sphere_3d.png +0 -0
  38. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/metadata.json +0 -0
  39. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/openpose/running_pose.png +0 -0
  40. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/openpose/sitting_pose.png +0 -0
  41. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/openpose/standing_pose.png +0 -0
  42. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/scribble/car_sketch.png +0 -0
  43. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/scribble/face_sketch.png +0 -0
  44. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/samples/scribble/tree_sketch.png +0 -0
  45. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/templates/index.html +0 -0
  46. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/ui/web.py +0 -0
  47. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser/utils/__init__.py +0 -0
  48. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser.egg-info/SOURCES.txt +0 -0
  49. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser.egg-info/dependency_links.txt +0 -0
  50. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser.egg-info/entry_points.txt +0 -0
  51. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser.egg-info/not-zip-safe +0 -0
  52. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser.egg-info/requires.txt +0 -0
  53. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/ollamadiffuser.egg-info/top_level.txt +0 -0
  54. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/requirements.txt +0 -0
  55. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/setup.cfg +0 -0
  56. {ollamadiffuser-1.2.0 → ollamadiffuser-1.2.2}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ollamadiffuser
3
- Version: 1.2.0
3
+ Version: 1.2.2
4
4
  Summary: 🎨 Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX.1, and LoRA support
5
5
  Home-page: https://github.com/ollamadiffuser/ollamadiffuser
6
6
  Author: OllamaDiffuser Team
@@ -82,6 +82,63 @@ Dynamic: requires-python
82
82
 
83
83
  ---
84
84
 
85
+ ## 🔑 Hugging Face Authentication
86
+
87
+ **Do you need a Hugging Face token?** It depends on which models you want to use!
88
+
89
+ ### 🟢 Models that DON'T require a token:
90
+ - **FLUX.1-schnell** - Apache 2.0 license, ready to use ✅
91
+ - **Stable Diffusion 1.5** - Basic model, no authentication needed ✅
92
+ - **Most ControlNet models** - Generally public access ✅
93
+
94
+ ### 🟡 Models that DO require a token:
95
+ - **FLUX.1-dev** - Requires HF token and license agreement ⚠️
96
+ - **Stable Diffusion 3.5** - Requires HF token and license agreement ⚠️
97
+ - **Some premium LoRAs** - Gated models from Hugging Face ⚠️
98
+
99
+ ### 🚀 Quick Setup
100
+
101
+ **For basic usage** (no token needed):
102
+ ```bash
103
+ # These work immediately without any setup:
104
+ ollamadiffuser pull flux.1-schnell
105
+ ollamadiffuser pull stable-diffusion-1.5
106
+ ```
107
+
108
+ **For advanced models** (token required):
109
+ ```bash
110
+ # 1. Set your token
111
+ export HF_TOKEN=your_token_here
112
+
113
+ # 2. Now you can access gated models
114
+ ollamadiffuser pull flux.1-dev
115
+ ollamadiffuser pull stable-diffusion-3.5-medium
116
+ ```
117
+
118
+ ### 🔧 How to get a Hugging Face token:
119
+
120
+ 1. **Create account**: Visit [huggingface.co](https://huggingface.co) and sign up
121
+ 2. **Generate token**: Go to Settings → Access Tokens → Create new token
122
+ 3. **Accept licenses**: Visit the model pages and accept license agreements:
123
+ - [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)
124
+ - [Stable Diffusion 3.5](https://huggingface.co/stabilityai/stable-diffusion-3.5-medium)
125
+ 4. **Set environment variable**:
126
+ ```bash
127
+ # Temporary (current session)
128
+ export HF_TOKEN=your_token_here
129
+
130
+ # Permanent (add to ~/.bashrc or ~/.zshrc)
131
+ echo 'export HF_TOKEN=your_token_here' >> ~/.bashrc
132
+ ```
133
+
134
+ ### 💡 Pro Tips:
135
+ - **Start simple**: Begin with FLUX.1-schnell (no token required, commercial use OK)
136
+ - **Token scope**: Use "read" permissions for downloading models
137
+ - **Privacy**: Your token stays local - never shared with OllamaDiffuser servers
138
+ - **Troubleshooting**: If downloads fail, verify your token and model access permissions
139
+
140
+ ---
141
+
85
142
  ## ✨ Features
86
143
 
87
144
  - **🚀 Fast Startup**: Instant application launch with lazy loading architecture
@@ -423,7 +480,6 @@ with open("control.jpg", "rb") as f:
423
480
 
424
481
  - **[GGUF Models Guide](GGUF_GUIDE.md)**: Complete guide to memory-efficient GGUF models
425
482
  - **[ControlNet Guide](CONTROLNET_GUIDE.md)**: Comprehensive ControlNet usage and examples
426
- - **[Installation Guide](INSTALLATION_GUIDE.md)**: Detailed installation instructions
427
483
  - **[Website Documentation](https://www.ollamadiffuser.com/)**: Complete tutorials and guides
428
484
 
429
485
  ## 🚀 Performance & Hardware
@@ -13,6 +13,63 @@
13
13
 
14
14
  ---
15
15
 
16
+ ## 🔑 Hugging Face Authentication
17
+
18
+ **Do you need a Hugging Face token?** It depends on which models you want to use!
19
+
20
+ ### 🟢 Models that DON'T require a token:
21
+ - **FLUX.1-schnell** - Apache 2.0 license, ready to use ✅
22
+ - **Stable Diffusion 1.5** - Basic model, no authentication needed ✅
23
+ - **Most ControlNet models** - Generally public access ✅
24
+
25
+ ### 🟡 Models that DO require a token:
26
+ - **FLUX.1-dev** - Requires HF token and license agreement ⚠️
27
+ - **Stable Diffusion 3.5** - Requires HF token and license agreement ⚠️
28
+ - **Some premium LoRAs** - Gated models from Hugging Face ⚠️
29
+
30
+ ### 🚀 Quick Setup
31
+
32
+ **For basic usage** (no token needed):
33
+ ```bash
34
+ # These work immediately without any setup:
35
+ ollamadiffuser pull flux.1-schnell
36
+ ollamadiffuser pull stable-diffusion-1.5
37
+ ```
38
+
39
+ **For advanced models** (token required):
40
+ ```bash
41
+ # 1. Set your token
42
+ export HF_TOKEN=your_token_here
43
+
44
+ # 2. Now you can access gated models
45
+ ollamadiffuser pull flux.1-dev
46
+ ollamadiffuser pull stable-diffusion-3.5-medium
47
+ ```
48
+
49
+ ### 🔧 How to get a Hugging Face token:
50
+
51
+ 1. **Create account**: Visit [huggingface.co](https://huggingface.co) and sign up
52
+ 2. **Generate token**: Go to Settings → Access Tokens → Create new token
53
+ 3. **Accept licenses**: Visit the model pages and accept license agreements:
54
+ - [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)
55
+ - [Stable Diffusion 3.5](https://huggingface.co/stabilityai/stable-diffusion-3.5-medium)
56
+ 4. **Set environment variable**:
57
+ ```bash
58
+ # Temporary (current session)
59
+ export HF_TOKEN=your_token_here
60
+
61
+ # Permanent (add to ~/.bashrc or ~/.zshrc)
62
+ echo 'export HF_TOKEN=your_token_here' >> ~/.bashrc
63
+ ```
64
+
65
+ ### 💡 Pro Tips:
66
+ - **Start simple**: Begin with FLUX.1-schnell (no token required, commercial use OK)
67
+ - **Token scope**: Use "read" permissions for downloading models
68
+ - **Privacy**: Your token stays local - never shared with OllamaDiffuser servers
69
+ - **Troubleshooting**: If downloads fail, verify your token and model access permissions
70
+
71
+ ---
72
+
16
73
  ## ✨ Features
17
74
 
18
75
  - **🚀 Fast Startup**: Instant application launch with lazy loading architecture
@@ -354,7 +411,6 @@ with open("control.jpg", "rb") as f:
354
411
 
355
412
  - **[GGUF Models Guide](GGUF_GUIDE.md)**: Complete guide to memory-efficient GGUF models
356
413
  - **[ControlNet Guide](CONTROLNET_GUIDE.md)**: Comprehensive ControlNet usage and examples
357
- - **[Installation Guide](INSTALLATION_GUIDE.md)**: Detailed installation instructions
358
414
  - **[Website Documentation](https://www.ollamadiffuser.com/)**: Complete tutorials and guides
359
415
 
360
416
  ## 🚀 Performance & Hardware
@@ -4,7 +4,7 @@ OllamaDiffuser - Local AI Image Generation with Ollama-style CLI
4
4
  A tool for managing and running Stable Diffusion, FLUX.1, and other AI image generation models locally.
5
5
  """
6
6
 
7
- __version__ = "1.2.0"
7
+ __version__ = "1.2.2"
8
8
  __author__ = "OllamaDiffuser Team"
9
9
  __email__ = "ollamadiffuser@gmail.com"
10
10
  __description__ = "🎨 Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX.1, and LoRA support"
@@ -20,7 +20,9 @@ class GenerateRequest(BaseModel):
20
20
  prompt: str
21
21
  negative_prompt: str = "low quality, bad anatomy, worst quality, low resolution"
22
22
  num_inference_steps: Optional[int] = None
23
+ steps: Optional[int] = None # Alias for num_inference_steps for convenience
23
24
  guidance_scale: Optional[float] = None
25
+ cfg_scale: Optional[float] = None # Alias for guidance_scale for convenience
24
26
  width: int = 1024
25
27
  height: int = 1024
26
28
  control_image_path: Optional[str] = None # Path to control image file
@@ -232,12 +234,18 @@ def create_app() -> FastAPI:
232
234
  # Get current loaded inference engine
233
235
  engine = model_manager.loaded_model
234
236
 
237
+ # Handle parameter aliasing - prioritize shorter names for convenience
238
+ steps = request.steps if request.steps is not None else request.num_inference_steps
239
+ guidance = request.cfg_scale if request.cfg_scale is not None else request.guidance_scale
240
+
235
241
  # Generate image
236
242
  image = engine.generate_image(
237
243
  prompt=request.prompt,
238
244
  negative_prompt=request.negative_prompt,
239
- num_inference_steps=request.num_inference_steps,
240
- guidance_scale=request.guidance_scale,
245
+ num_inference_steps=steps,
246
+ steps=steps, # Pass both for GGUF compatibility
247
+ guidance_scale=guidance,
248
+ cfg_scale=guidance, # Pass both for GGUF compatibility
241
249
  width=request.width,
242
250
  height=request.height,
243
251
  control_image=request.control_image_path,