ollamadiffuser 1.1.0__tar.gz → 1.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {ollamadiffuser-1.1.0/ollamadiffuser.egg-info → ollamadiffuser-1.1.2}/PKG-INFO +125 -74
  2. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/README.md +124 -73
  3. ollamadiffuser-1.1.2/ollamadiffuser/__init__.py +31 -0
  4. ollamadiffuser-1.1.2/ollamadiffuser/__main__.py +9 -0
  5. ollamadiffuser-1.1.2/ollamadiffuser/cli/commands.py +133 -0
  6. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/cli/main.py +124 -18
  7. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/core/utils/controlnet_preprocessors.py +79 -7
  8. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/core/utils/download_utils.py +169 -40
  9. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2/ollamadiffuser.egg-info}/PKG-INFO +125 -74
  10. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser.egg-info/SOURCES.txt +1 -0
  11. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/pyproject.toml +1 -1
  12. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/setup.py +44 -23
  13. ollamadiffuser-1.1.0/ollamadiffuser/__main__.py +0 -50
  14. ollamadiffuser-1.1.0/ollamadiffuser/utils/__init__.py +0 -0
  15. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/CHANGELOG.md +0 -0
  16. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/LICENSE +0 -0
  17. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/MANIFEST.in +0 -0
  18. {ollamadiffuser-1.1.0/ollamadiffuser → ollamadiffuser-1.1.2/ollamadiffuser/api}/__init__.py +0 -0
  19. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/api/server.py +0 -0
  20. {ollamadiffuser-1.1.0/ollamadiffuser/api → ollamadiffuser-1.1.2/ollamadiffuser/cli}/__init__.py +0 -0
  21. {ollamadiffuser-1.1.0/ollamadiffuser/cli → ollamadiffuser-1.1.2/ollamadiffuser/core}/__init__.py +0 -0
  22. {ollamadiffuser-1.1.0/ollamadiffuser/core → ollamadiffuser-1.1.2/ollamadiffuser/core/config}/__init__.py +0 -0
  23. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/core/config/settings.py +0 -0
  24. {ollamadiffuser-1.1.0/ollamadiffuser/core/config → ollamadiffuser-1.1.2/ollamadiffuser/core/inference}/__init__.py +0 -0
  25. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/core/inference/engine.py +0 -0
  26. {ollamadiffuser-1.1.0/ollamadiffuser/core/inference → ollamadiffuser-1.1.2/ollamadiffuser/core/models}/__init__.py +0 -0
  27. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/core/models/manager.py +0 -0
  28. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/core/utils/__init__.py +0 -0
  29. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/core/utils/lora_manager.py +0 -0
  30. {ollamadiffuser-1.1.0/ollamadiffuser/core/models → ollamadiffuser-1.1.2/ollamadiffuser/ui}/__init__.py +0 -0
  31. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/ui/templates/index.html +0 -0
  32. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser/ui/web.py +0 -0
  33. {ollamadiffuser-1.1.0/ollamadiffuser/ui → ollamadiffuser-1.1.2/ollamadiffuser/utils}/__init__.py +0 -0
  34. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser.egg-info/dependency_links.txt +0 -0
  35. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser.egg-info/entry_points.txt +0 -0
  36. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser.egg-info/not-zip-safe +0 -0
  37. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser.egg-info/requires.txt +0 -0
  38. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/ollamadiffuser.egg-info/top_level.txt +0 -0
  39. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/requirements.txt +0 -0
  40. {ollamadiffuser-1.1.0 → ollamadiffuser-1.1.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ollamadiffuser
3
- Version: 1.1.0
3
+ Version: 1.1.2
4
4
  Summary: 🎨 Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX.1, and LoRA support
5
5
  Home-page: https://github.com/ollamadiffuser/ollamadiffuser
6
6
  Author: OllamaDiffuser Team
@@ -70,6 +70,7 @@ Dynamic: requires-python
70
70
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
71
71
  [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
72
72
 
73
+
73
74
  ## Local AI Image Generation with OllamaDiffuser
74
75
 
75
76
  **OllamaDiffuser** simplifies local deployment of **Stable Diffusion**, **FLUX.1**, and other AI image generation models. An intuitive **local SD** tool inspired by **Ollama's** simplicity - perfect for **local diffuser** workflows with CLI, web UI, and LoRA support.
@@ -109,7 +110,7 @@ curl -X POST http://localhost:8000/api/generate \
109
110
  ### Option 2: Development Installation
110
111
  ```bash
111
112
  # Clone the repository
112
- git clone https://github.com/yourusername/ollamadiffuser.git
113
+ git clone https://github.com/ollamadiffuser/ollamadiffuser.git
113
114
  cd ollamadiffuser
114
115
 
115
116
  # Install dependencies
@@ -118,17 +119,25 @@ pip install -e .
118
119
 
119
120
  ### Basic Usage
120
121
  ```bash
122
+ # Check version
123
+ ollamadiffuser -V
124
+
121
125
  # Install a model
122
126
  ollamadiffuser pull stable-diffusion-1.5
123
127
 
124
- # Load the model
125
- ollamadiffuser load stable-diffusion-1.5
128
+ # Run the model (loads and starts API server)
129
+ ollamadiffuser run stable-diffusion-1.5
126
130
 
127
- # Generate an image
128
- ollamadiffuser generate "a beautiful sunset over mountains"
131
+ # Generate an image via API
132
+ curl -X POST http://localhost:8000/api/generate \
133
+ -H "Content-Type: application/json" \
134
+ -d '{"prompt": "a beautiful sunset over mountains"}' \
135
+ --output image.png
129
136
 
130
137
  # Start web interface
131
138
  ollamadiffuser --mode ui
139
+
140
+ open http://localhost:8001
132
141
  ```
133
142
 
134
143
  ### ControlNet Quick Start
@@ -136,8 +145,8 @@ ollamadiffuser --mode ui
136
145
  # Install ControlNet model
137
146
  ollamadiffuser pull controlnet-canny-sd15
138
147
 
139
- # Load ControlNet model
140
- ollamadiffuser load controlnet-canny-sd15
148
+ # Run ControlNet model (loads and starts API server)
149
+ ollamadiffuser run controlnet-canny-sd15
141
150
 
142
151
  # Generate with control image
143
152
  curl -X POST http://localhost:8000/api/generate/controlnet \
@@ -219,21 +228,29 @@ ollamadiffuser lora unload
219
228
 
220
229
  ### Command Line Interface
221
230
  ```bash
222
- # Generate with advanced parameters
223
- ollamadiffuser generate \
224
- "a futuristic cityscape" \
225
- --negative-prompt "blurry, low quality" \
226
- --steps 30 \
227
- --guidance 7.5 \
228
- --width 1024 \
229
- --height 1024
231
+ # Pull and run a model
232
+ ollamadiffuser pull stable-diffusion-1.5
233
+ ollamadiffuser run stable-diffusion-1.5
234
+
235
+ # In another terminal, generate images via API
236
+ curl -X POST http://localhost:8000/api/generate \
237
+ -H "Content-Type: application/json" \
238
+ -d '{
239
+ "prompt": "a futuristic cityscape",
240
+ "negative_prompt": "blurry, low quality",
241
+ "num_inference_steps": 30,
242
+ "guidance_scale": 7.5,
243
+ "width": 1024,
244
+ "height": 1024
245
+ }' \
246
+ --output image.png
230
247
  ```
231
248
 
232
249
  ### Web UI
233
250
  ```bash
234
251
  # Start web interface
235
252
  ollamadiffuser --mode ui
236
- # Open http://localhost:8001
253
+ Open http://localhost:8001
237
254
  ```
238
255
 
239
256
  Features:
@@ -247,10 +264,15 @@ Features:
247
264
  # Start API server
248
265
  ollamadiffuser --mode api
249
266
 
267
+ ollamadiffuser load stable-diffusion-1.5
268
+
250
269
  # Generate image
251
270
  curl -X POST http://localhost:8000/api/generate \
252
271
  -H "Content-Type: application/json" \
253
272
  -d '{"prompt": "a beautiful landscape", "width": 1024, "height": 1024}'
273
+
274
+ # API document
275
+ http://localhost:8000/docs
254
276
  ```
255
277
 
256
278
  ### Python API
@@ -258,16 +280,19 @@ curl -X POST http://localhost:8000/api/generate \
258
280
  from ollamadiffuser.core.models.manager import model_manager
259
281
 
260
282
  # Load model
261
- model_manager.load_model("stable-diffusion-1.5")
262
- engine = model_manager.loaded_model
263
-
264
- # Generate image
265
- image = engine.generate_image(
266
- prompt="a beautiful sunset",
267
- width=1024,
268
- height=1024
269
- )
270
- image.save("output.jpg")
283
+ success = model_manager.load_model("stable-diffusion-1.5")
284
+ if success:
285
+ engine = model_manager.loaded_model
286
+
287
+ # Generate image
288
+ image = engine.generate_image(
289
+ prompt="a beautiful sunset",
290
+ width=1024,
291
+ height=1024
292
+ )
293
+ image.save("output.jpg")
294
+ else:
295
+ print("Failed to load model")
271
296
  ```
272
297
 
273
298
  ## 📦 Supported Models
@@ -297,18 +322,6 @@ Models are automatically configured with optimal settings:
297
322
  - **Precision Handling**: FP16/BF16 support for efficiency
298
323
  - **Safety Features**: NSFW filter bypass for creative freedom
299
324
 
300
- ### Performance Tuning
301
- ```bash
302
- # Enable verbose logging
303
- ollamadiffuser -v generate "test prompt"
304
-
305
- # Check system status
306
- ollamadiffuser status
307
-
308
- # Monitor memory usage
309
- ollamadiffuser info
310
- ```
311
-
312
325
  ## 🔧 Advanced Usage
313
326
 
314
327
  ### ControlNet Parameters
@@ -331,8 +344,9 @@ from ollamadiffuser.core.utils.controlnet_preprocessors import controlnet_prepro
331
344
  controlnet_preprocessor.initialize()
332
345
 
333
346
  # Process multiple images
334
- for image_path in image_list:
335
- control_img = controlnet_preprocessor.preprocess(image, "canny")
347
+ prompt = "beautiful landscape" # Define the prompt
348
+ for i, image_path in enumerate(image_list):
349
+ control_img = controlnet_preprocessor.preprocess(image_path, "canny")
336
350
  result = engine.generate_image(prompt, control_image=control_img)
337
351
  result.save(f"output_{i}.jpg")
338
352
  ```
@@ -360,8 +374,6 @@ with open("control.jpg", "rb") as f:
360
374
  ## 📚 Documentation & Guides
361
375
 
362
376
  - **[ControlNet Guide](CONTROLNET_GUIDE.md)**: Comprehensive ControlNet usage and examples
363
- - **[LoRA Guide](LORA_GUIDE.md)**: LoRA management and best practices
364
- - **[API Reference](API_REFERENCE.md)**: Complete API documentation
365
377
  - **[Website Documentation](https://www.ollamadiffuser.com/)**: Complete tutorials and guides
366
378
 
367
379
  ## 🚀 Performance & Hardware
@@ -383,6 +395,43 @@ with open("control.jpg", "rb") as f:
383
395
 
384
396
  ## 🔧 Troubleshooting
385
397
 
398
+ ### Installation Issues
399
+
400
+ #### Missing Dependencies (cv2/OpenCV Error)
401
+ If you encounter `ModuleNotFoundError: No module named 'cv2'`, run:
402
+
403
+ ```bash
404
+ # Quick fix
405
+ pip install opencv-python>=4.8.0
406
+
407
+ # Or use the built-in verification tool
408
+ ollamadiffuser verify-deps
409
+
410
+ # Or install with all optional dependencies
411
+ pip install ollamadiffuser[full]
412
+ ```
413
+
414
+ #### Complete Dependency Check
415
+ ```bash
416
+ # Run comprehensive system diagnostics
417
+ ollamadiffuser doctor
418
+
419
+ # Verify and install missing dependencies interactively
420
+ ollamadiffuser verify-deps
421
+ ```
422
+
423
+ #### Clean Installation
424
+ If you're having persistent issues:
425
+
426
+ ```bash
427
+ # Uninstall and reinstall
428
+ pip uninstall ollamadiffuser
429
+ pip install --no-cache-dir ollamadiffuser[full]
430
+
431
+ # Verify installation
432
+ ollamadiffuser verify-deps
433
+ ```
434
+
386
435
  ### Common Issues
387
436
 
388
437
  #### Slow Startup
@@ -407,46 +456,49 @@ curl -X POST http://localhost:8000/api/controlnet/initialize
407
456
 
408
457
  #### Memory Issues
409
458
  ```bash
410
- # Use smaller image sizes
411
- ollamadiffuser generate "test" --width 512 --height 512
459
+ # Use smaller image sizes via API
460
+ curl -X POST http://localhost:8000/api/generate \
461
+ -H "Content-Type: application/json" \
462
+ -d '{"prompt": "test", "width": 512, "height": 512}' \
463
+ --output test.png
412
464
 
413
- # Enable CPU offloading (automatic)
414
- # Close other applications
465
+ # CPU offloading is automatic
466
+ # Close other applications to free memory
415
467
  # Use basic preprocessors instead of advanced ones
416
468
  ```
417
469
 
418
- ### Debug Mode
419
- ```bash
420
- # Enable verbose logging
421
- ollamadiffuser -v run model-name
470
+ ### Platform-Specific Issues
422
471
 
423
- # Check system information
424
- ollamadiffuser info
425
-
426
- # Validate installation
427
- ollamadiffuser doctor
472
+ #### macOS Apple Silicon
473
+ ```bash
474
+ # If you encounter OpenCV issues on Apple Silicon
475
+ pip uninstall opencv-python
476
+ pip install opencv-python-headless>=4.8.0
428
477
  ```
429
478
 
430
- ## 🤝 Contributing
431
-
432
- We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
479
+ #### Windows
480
+ ```bash
481
+ # If you encounter build errors
482
+ pip install --only-binary=all opencv-python>=4.8.0
483
+ ```
433
484
 
434
- ### Development Setup
485
+ #### Linux
435
486
  ```bash
436
- # Clone repository
437
- git clone https://github.com/yourusername/ollamadiffuser.git
438
- cd ollamadiffuser
487
+ # If you need system dependencies
488
+ sudo apt-get update
489
+ sudo apt-get install libgl1-mesa-glx libglib2.0-0
490
+ pip install opencv-python>=4.8.0
491
+ ```
439
492
 
440
- # Install in development mode
441
- pip install -e ".[dev]"
493
+ ### Debug Mode
494
+ ```bash
495
+ # Enable verbose logging
496
+ ollamadiffuser --verbose run model-name
497
+ ```
442
498
 
443
- # Run tests
444
- pytest tests/
499
+ ## 🤝 Contributing
445
500
 
446
- # Run linting
447
- flake8 ollamadiffuser/
448
- black ollamadiffuser/
449
- ```
501
+ We welcome contributions! Please check the GitHub repository for contribution guidelines.
450
502
 
451
503
  ## 🤝 Community & Support
452
504
 
@@ -476,9 +528,8 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
476
528
 
477
529
  ## 📞 Support
478
530
 
479
- - **Documentation**: [Full documentation](docs/)
480
- - **Issues**: [GitHub Issues](https://github.com/yourusername/ollamadiffuser/issues)
481
- - **Discussions**: [GitHub Discussions](https://github.com/yourusername/ollamadiffuser/discussions)
531
+ - **Issues**: [GitHub Issues](https://github.com/ollamadiffuser/ollamadiffuser/issues)
532
+ - **Discussions**: [GitHub Discussions](https://github.com/ollamadiffuser/ollamadiffuser/discussions)
482
533
 
483
534
  ---
484
535
 
@@ -4,6 +4,7 @@
4
4
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
5
5
  [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
6
6
 
7
+
7
8
  ## Local AI Image Generation with OllamaDiffuser
8
9
 
9
10
  **OllamaDiffuser** simplifies local deployment of **Stable Diffusion**, **FLUX.1**, and other AI image generation models. An intuitive **local SD** tool inspired by **Ollama's** simplicity - perfect for **local diffuser** workflows with CLI, web UI, and LoRA support.
@@ -43,7 +44,7 @@ curl -X POST http://localhost:8000/api/generate \
43
44
  ### Option 2: Development Installation
44
45
  ```bash
45
46
  # Clone the repository
46
- git clone https://github.com/yourusername/ollamadiffuser.git
47
+ git clone https://github.com/ollamadiffuser/ollamadiffuser.git
47
48
  cd ollamadiffuser
48
49
 
49
50
  # Install dependencies
@@ -52,17 +53,25 @@ pip install -e .
52
53
 
53
54
  ### Basic Usage
54
55
  ```bash
56
+ # Check version
57
+ ollamadiffuser -V
58
+
55
59
  # Install a model
56
60
  ollamadiffuser pull stable-diffusion-1.5
57
61
 
58
- # Load the model
59
- ollamadiffuser load stable-diffusion-1.5
62
+ # Run the model (loads and starts API server)
63
+ ollamadiffuser run stable-diffusion-1.5
60
64
 
61
- # Generate an image
62
- ollamadiffuser generate "a beautiful sunset over mountains"
65
+ # Generate an image via API
66
+ curl -X POST http://localhost:8000/api/generate \
67
+ -H "Content-Type: application/json" \
68
+ -d '{"prompt": "a beautiful sunset over mountains"}' \
69
+ --output image.png
63
70
 
64
71
  # Start web interface
65
72
  ollamadiffuser --mode ui
73
+
74
+ open http://localhost:8001
66
75
  ```
67
76
 
68
77
  ### ControlNet Quick Start
@@ -70,8 +79,8 @@ ollamadiffuser --mode ui
70
79
  # Install ControlNet model
71
80
  ollamadiffuser pull controlnet-canny-sd15
72
81
 
73
- # Load ControlNet model
74
- ollamadiffuser load controlnet-canny-sd15
82
+ # Run ControlNet model (loads and starts API server)
83
+ ollamadiffuser run controlnet-canny-sd15
75
84
 
76
85
  # Generate with control image
77
86
  curl -X POST http://localhost:8000/api/generate/controlnet \
@@ -153,21 +162,29 @@ ollamadiffuser lora unload
153
162
 
154
163
  ### Command Line Interface
155
164
  ```bash
156
- # Generate with advanced parameters
157
- ollamadiffuser generate \
158
- "a futuristic cityscape" \
159
- --negative-prompt "blurry, low quality" \
160
- --steps 30 \
161
- --guidance 7.5 \
162
- --width 1024 \
163
- --height 1024
165
+ # Pull and run a model
166
+ ollamadiffuser pull stable-diffusion-1.5
167
+ ollamadiffuser run stable-diffusion-1.5
168
+
169
+ # In another terminal, generate images via API
170
+ curl -X POST http://localhost:8000/api/generate \
171
+ -H "Content-Type: application/json" \
172
+ -d '{
173
+ "prompt": "a futuristic cityscape",
174
+ "negative_prompt": "blurry, low quality",
175
+ "num_inference_steps": 30,
176
+ "guidance_scale": 7.5,
177
+ "width": 1024,
178
+ "height": 1024
179
+ }' \
180
+ --output image.png
164
181
  ```
165
182
 
166
183
  ### Web UI
167
184
  ```bash
168
185
  # Start web interface
169
186
  ollamadiffuser --mode ui
170
- # Open http://localhost:8001
187
+ Open http://localhost:8001
171
188
  ```
172
189
 
173
190
  Features:
@@ -181,10 +198,15 @@ Features:
181
198
  # Start API server
182
199
  ollamadiffuser --mode api
183
200
 
201
+ ollamadiffuser load stable-diffusion-1.5
202
+
184
203
  # Generate image
185
204
  curl -X POST http://localhost:8000/api/generate \
186
205
  -H "Content-Type: application/json" \
187
206
  -d '{"prompt": "a beautiful landscape", "width": 1024, "height": 1024}'
207
+
208
+ # API document
209
+ http://localhost:8000/docs
188
210
  ```
189
211
 
190
212
  ### Python API
@@ -192,16 +214,19 @@ curl -X POST http://localhost:8000/api/generate \
192
214
  from ollamadiffuser.core.models.manager import model_manager
193
215
 
194
216
  # Load model
195
- model_manager.load_model("stable-diffusion-1.5")
196
- engine = model_manager.loaded_model
197
-
198
- # Generate image
199
- image = engine.generate_image(
200
- prompt="a beautiful sunset",
201
- width=1024,
202
- height=1024
203
- )
204
- image.save("output.jpg")
217
+ success = model_manager.load_model("stable-diffusion-1.5")
218
+ if success:
219
+ engine = model_manager.loaded_model
220
+
221
+ # Generate image
222
+ image = engine.generate_image(
223
+ prompt="a beautiful sunset",
224
+ width=1024,
225
+ height=1024
226
+ )
227
+ image.save("output.jpg")
228
+ else:
229
+ print("Failed to load model")
205
230
  ```
206
231
 
207
232
  ## 📦 Supported Models
@@ -231,18 +256,6 @@ Models are automatically configured with optimal settings:
231
256
  - **Precision Handling**: FP16/BF16 support for efficiency
232
257
  - **Safety Features**: NSFW filter bypass for creative freedom
233
258
 
234
- ### Performance Tuning
235
- ```bash
236
- # Enable verbose logging
237
- ollamadiffuser -v generate "test prompt"
238
-
239
- # Check system status
240
- ollamadiffuser status
241
-
242
- # Monitor memory usage
243
- ollamadiffuser info
244
- ```
245
-
246
259
  ## 🔧 Advanced Usage
247
260
 
248
261
  ### ControlNet Parameters
@@ -265,8 +278,9 @@ from ollamadiffuser.core.utils.controlnet_preprocessors import controlnet_prepro
265
278
  controlnet_preprocessor.initialize()
266
279
 
267
280
  # Process multiple images
268
- for image_path in image_list:
269
- control_img = controlnet_preprocessor.preprocess(image, "canny")
281
+ prompt = "beautiful landscape" # Define the prompt
282
+ for i, image_path in enumerate(image_list):
283
+ control_img = controlnet_preprocessor.preprocess(image_path, "canny")
270
284
  result = engine.generate_image(prompt, control_image=control_img)
271
285
  result.save(f"output_{i}.jpg")
272
286
  ```
@@ -294,8 +308,6 @@ with open("control.jpg", "rb") as f:
294
308
  ## 📚 Documentation & Guides
295
309
 
296
310
  - **[ControlNet Guide](CONTROLNET_GUIDE.md)**: Comprehensive ControlNet usage and examples
297
- - **[LoRA Guide](LORA_GUIDE.md)**: LoRA management and best practices
298
- - **[API Reference](API_REFERENCE.md)**: Complete API documentation
299
311
  - **[Website Documentation](https://www.ollamadiffuser.com/)**: Complete tutorials and guides
300
312
 
301
313
  ## 🚀 Performance & Hardware
@@ -317,6 +329,43 @@ with open("control.jpg", "rb") as f:
317
329
 
318
330
  ## 🔧 Troubleshooting
319
331
 
332
+ ### Installation Issues
333
+
334
+ #### Missing Dependencies (cv2/OpenCV Error)
335
+ If you encounter `ModuleNotFoundError: No module named 'cv2'`, run:
336
+
337
+ ```bash
338
+ # Quick fix
339
+ pip install opencv-python>=4.8.0
340
+
341
+ # Or use the built-in verification tool
342
+ ollamadiffuser verify-deps
343
+
344
+ # Or install with all optional dependencies
345
+ pip install ollamadiffuser[full]
346
+ ```
347
+
348
+ #### Complete Dependency Check
349
+ ```bash
350
+ # Run comprehensive system diagnostics
351
+ ollamadiffuser doctor
352
+
353
+ # Verify and install missing dependencies interactively
354
+ ollamadiffuser verify-deps
355
+ ```
356
+
357
+ #### Clean Installation
358
+ If you're having persistent issues:
359
+
360
+ ```bash
361
+ # Uninstall and reinstall
362
+ pip uninstall ollamadiffuser
363
+ pip install --no-cache-dir ollamadiffuser[full]
364
+
365
+ # Verify installation
366
+ ollamadiffuser verify-deps
367
+ ```
368
+
320
369
  ### Common Issues
321
370
 
322
371
  #### Slow Startup
@@ -341,46 +390,49 @@ curl -X POST http://localhost:8000/api/controlnet/initialize
341
390
 
342
391
  #### Memory Issues
343
392
  ```bash
344
- # Use smaller image sizes
345
- ollamadiffuser generate "test" --width 512 --height 512
393
+ # Use smaller image sizes via API
394
+ curl -X POST http://localhost:8000/api/generate \
395
+ -H "Content-Type: application/json" \
396
+ -d '{"prompt": "test", "width": 512, "height": 512}' \
397
+ --output test.png
346
398
 
347
- # Enable CPU offloading (automatic)
348
- # Close other applications
399
+ # CPU offloading is automatic
400
+ # Close other applications to free memory
349
401
  # Use basic preprocessors instead of advanced ones
350
402
  ```
351
403
 
352
- ### Debug Mode
353
- ```bash
354
- # Enable verbose logging
355
- ollamadiffuser -v run model-name
404
+ ### Platform-Specific Issues
356
405
 
357
- # Check system information
358
- ollamadiffuser info
359
-
360
- # Validate installation
361
- ollamadiffuser doctor
406
+ #### macOS Apple Silicon
407
+ ```bash
408
+ # If you encounter OpenCV issues on Apple Silicon
409
+ pip uninstall opencv-python
410
+ pip install opencv-python-headless>=4.8.0
362
411
  ```
363
412
 
364
- ## 🤝 Contributing
365
-
366
- We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
413
+ #### Windows
414
+ ```bash
415
+ # If you encounter build errors
416
+ pip install --only-binary=all opencv-python>=4.8.0
417
+ ```
367
418
 
368
- ### Development Setup
419
+ #### Linux
369
420
  ```bash
370
- # Clone repository
371
- git clone https://github.com/yourusername/ollamadiffuser.git
372
- cd ollamadiffuser
421
+ # If you need system dependencies
422
+ sudo apt-get update
423
+ sudo apt-get install libgl1-mesa-glx libglib2.0-0
424
+ pip install opencv-python>=4.8.0
425
+ ```
373
426
 
374
- # Install in development mode
375
- pip install -e ".[dev]"
427
+ ### Debug Mode
428
+ ```bash
429
+ # Enable verbose logging
430
+ ollamadiffuser --verbose run model-name
431
+ ```
376
432
 
377
- # Run tests
378
- pytest tests/
433
+ ## 🤝 Contributing
379
434
 
380
- # Run linting
381
- flake8 ollamadiffuser/
382
- black ollamadiffuser/
383
- ```
435
+ We welcome contributions! Please check the GitHub repository for contribution guidelines.
384
436
 
385
437
  ## 🤝 Community & Support
386
438
 
@@ -410,9 +462,8 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
410
462
 
411
463
  ## 📞 Support
412
464
 
413
- - **Documentation**: [Full documentation](docs/)
414
- - **Issues**: [GitHub Issues](https://github.com/yourusername/ollamadiffuser/issues)
415
- - **Discussions**: [GitHub Discussions](https://github.com/yourusername/ollamadiffuser/discussions)
465
+ - **Issues**: [GitHub Issues](https://github.com/ollamadiffuser/ollamadiffuser/issues)
466
+ - **Discussions**: [GitHub Discussions](https://github.com/ollamadiffuser/ollamadiffuser/discussions)
416
467
 
417
468
  ---
418
469
 
@@ -0,0 +1,31 @@
1
+ """
2
+ OllamaDiffuser - Local AI Image Generation with Ollama-style CLI
3
+
4
+ A tool for managing and running Stable Diffusion, FLUX.1, and other AI image generation models locally.
5
+ """
6
+
7
+ __version__ = "1.1.1"
8
+ __author__ = "OllamaDiffuser Team"
9
+ __email__ = "ollamadiffuser@gmail.com"
10
+ __description__ = "🎨 Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX.1, and LoRA support"
11
+ __url__ = "https://www.ollamadiffuser.com/"
12
+ __repository__ = "https://github.com/ollamadiffuser/ollamadiffuser"
13
+
14
+ def get_version_info():
15
+ """Get formatted version information"""
16
+ return {
17
+ "version": __version__,
18
+ "description": __description__,
19
+ "url": __url__,
20
+ "repository": __repository__
21
+ }
22
+
23
+ def print_version():
24
+ """Print formatted version information"""
25
+ from rich import print as rprint
26
+ rprint(f"[bold cyan]OllamaDiffuser v{__version__}[/bold cyan]")
27
+ rprint(__description__)
28
+ rprint(f"🔗 {__url__}")
29
+
30
+ # For backward compatibility
31
+ __all__ = ["__version__", "__author__", "__email__", "__description__", "__url__", "__repository__", "get_version_info", "print_version"]
@@ -0,0 +1,9 @@
1
+ #!/usr/bin/env python3
2
+ from .cli.main import cli
3
+
4
+ def main():
5
+ """Main entry function"""
6
+ cli()
7
+
8
+ if __name__ == '__main__':
9
+ main()