ollamadiffuser 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,31 @@
1
+ """
2
+ OllamaDiffuser - Local AI Image Generation with Ollama-style CLI
3
+
4
+ A tool for managing and running Stable Diffusion, FLUX.1, and other AI image generation models locally.
5
+ """
6
+
7
+ __version__ = "1.1.1"
8
+ __author__ = "OllamaDiffuser Team"
9
+ __email__ = "ollamadiffuser@gmail.com"
10
+ __description__ = "🎨 Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX.1, and LoRA support"
11
+ __url__ = "https://www.ollamadiffuser.com/"
12
+ __repository__ = "https://github.com/ollamadiffuser/ollamadiffuser"
13
+
14
+ def get_version_info():
15
+ """Get formatted version information"""
16
+ return {
17
+ "version": __version__,
18
+ "description": __description__,
19
+ "url": __url__,
20
+ "repository": __repository__
21
+ }
22
+
23
+ def print_version():
24
+ """Print formatted version information"""
25
+ from rich import print as rprint
26
+ rprint(f"[bold cyan]OllamaDiffuser v{__version__}[/bold cyan]")
27
+ rprint(__description__)
28
+ rprint(f"🔗 {__url__}")
29
+
30
+ # For backward compatibility
31
+ __all__ = ["__version__", "__author__", "__email__", "__description__", "__url__", "__repository__", "get_version_info", "print_version"]
@@ -1,50 +1,9 @@
1
1
  #!/usr/bin/env python3
2
- import sys
3
- import argparse
4
2
  from .cli.main import cli
5
- from .api.server import run_server
6
- from .ui.web import create_ui_app
7
- from .core.config.settings import settings
8
3
 
9
4
  def main():
10
5
  """Main entry function"""
11
- # Check if first argument is a mode flag
12
- if len(sys.argv) > 1 and sys.argv[1] in ['--mode']:
13
- # Use argparse for mode selection
14
- parser = argparse.ArgumentParser(
15
- description='OllamaDiffuser - Image generation model management tool'
16
- )
17
- parser.add_argument(
18
- '--mode',
19
- choices=['cli', 'api', 'ui'],
20
- required=True,
21
- help='Running mode: cli (command line), api (API server), ui (Web interface)'
22
- )
23
- parser.add_argument('--host', default=None, help='Server host address')
24
- parser.add_argument('--port', type=int, default=None, help='Server port')
25
- parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
26
-
27
- args, unknown = parser.parse_known_args()
28
-
29
- if args.mode == 'cli':
30
- # Command line mode
31
- sys.argv = [sys.argv[0]] + unknown
32
- cli()
33
- elif args.mode == 'api':
34
- # API server mode
35
- print("Starting OllamaDiffuser API server...")
36
- run_server(host=args.host, port=args.port)
37
- elif args.mode == 'ui':
38
- # Web UI mode
39
- print("Starting OllamaDiffuser Web UI...")
40
- import uvicorn
41
- app = create_ui_app()
42
- host = args.host or settings.server.host
43
- port = args.port or (settings.server.port + 1) # Web UI uses different port
44
- uvicorn.run(app, host=host, port=port)
45
- else:
46
- # Default to CLI mode for direct command usage
47
- cli()
6
+ cli()
48
7
 
49
8
  if __name__ == '__main__':
50
9
  main()
@@ -0,0 +1,133 @@
1
+ import click
2
+ import subprocess
3
+ import sys
4
+ from rich.console import Console
5
+ from rich.table import Table
6
+ from rich.panel import Panel
7
+ from ..core.models.manager import model_manager
8
+ from ..core.config.settings import settings
9
+
10
+ console = Console()
11
+
12
+ @click.command()
13
+ def verify_deps():
14
+ """Verify and install missing dependencies"""
15
+ console.print("\n🔍 [bold blue]Checking OllamaDiffuser Dependencies[/bold blue]")
16
+
17
+ # Check critical dependencies
18
+ deps_status = {}
19
+
20
+ # OpenCV check
21
+ try:
22
+ import cv2
23
+ deps_status['opencv-python'] = f"✅ Installed (v{cv2.__version__})"
24
+ except ImportError:
25
+ deps_status['opencv-python'] = "❌ Missing"
26
+
27
+ # ControlNet Aux check
28
+ try:
29
+ import controlnet_aux
30
+ deps_status['controlnet-aux'] = "✅ Installed"
31
+ except ImportError:
32
+ deps_status['controlnet-aux'] = "❌ Missing"
33
+
34
+ # Torch check
35
+ try:
36
+ import torch
37
+ deps_status['torch'] = f"✅ Installed (v{torch.__version__})"
38
+ except ImportError:
39
+ deps_status['torch'] = "❌ Missing"
40
+
41
+ # Diffusers check
42
+ try:
43
+ import diffusers
44
+ deps_status['diffusers'] = f"✅ Installed (v{diffusers.__version__})"
45
+ except ImportError:
46
+ deps_status['diffusers'] = "❌ Missing"
47
+
48
+ # Create status table
49
+ table = Table(title="Dependency Status")
50
+ table.add_column("Package", style="cyan")
51
+ table.add_column("Status", style="white")
52
+
53
+ missing_deps = []
54
+ for dep, status in deps_status.items():
55
+ table.add_row(dep, status)
56
+ if "❌ Missing" in status:
57
+ missing_deps.append(dep)
58
+
59
+ console.print(table)
60
+
61
+ if missing_deps:
62
+ console.print(f"\n⚠️ [bold yellow]{len(missing_deps)} dependencies are missing[/bold yellow]")
63
+
64
+ if click.confirm("\nWould you like to install missing dependencies?"):
65
+ for dep in missing_deps:
66
+ console.print(f"\n📦 Installing {dep}...")
67
+
68
+ # Determine package name
69
+ if dep == 'opencv-python':
70
+ package = 'opencv-python>=4.8.0'
71
+ elif dep == 'controlnet-aux':
72
+ package = 'controlnet-aux>=0.0.7'
73
+ else:
74
+ package = dep
75
+
76
+ try:
77
+ subprocess.check_call([
78
+ sys.executable, "-m", "pip", "install", package
79
+ ])
80
+ console.print(f"✅ {dep} installed successfully")
81
+ except subprocess.CalledProcessError as e:
82
+ console.print(f"❌ Failed to install {dep}: {e}")
83
+
84
+ console.print("\n🔄 Re-run 'ollamadiffuser verify-deps' to check status")
85
+ else:
86
+ console.print("\n🎉 [bold green]All dependencies are installed![/bold green]")
87
+
88
+ # Check ControlNet preprocessors
89
+ console.print("\n🔧 [bold blue]Testing ControlNet Preprocessors[/bold blue]")
90
+ try:
91
+ from ..core.utils.controlnet_preprocessors import controlnet_preprocessor
92
+ if controlnet_preprocessor.is_available():
93
+ available_types = controlnet_preprocessor.get_available_types()
94
+ console.print(f"✅ Available types: {', '.join(available_types)}")
95
+ else:
96
+ console.print("⚠️ ControlNet preprocessors not fully available")
97
+ except Exception as e:
98
+ console.print(f"❌ Error testing preprocessors: {e}")
99
+
100
+ @click.command()
101
+ def doctor():
102
+ """Run comprehensive system diagnostics"""
103
+ console.print(Panel.fit("🩺 [bold blue]OllamaDiffuser Doctor[/bold blue]"))
104
+
105
+ # System info
106
+ import platform
107
+ console.print(f"\n💻 System: {platform.system()} {platform.release()}")
108
+ console.print(f"🐍 Python: {sys.version.split()[0]}")
109
+
110
+ # GPU info
111
+ try:
112
+ import torch
113
+ if torch.cuda.is_available():
114
+ console.print(f"🎮 CUDA: Available ({torch.cuda.get_device_name()})")
115
+ elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
116
+ console.print("🍎 Apple Metal: Available")
117
+ else:
118
+ console.print("⚙️ GPU: CPU only")
119
+ except ImportError:
120
+ console.print("❌ PyTorch not installed")
121
+
122
+ # Memory info
123
+ try:
124
+ import psutil
125
+ memory = psutil.virtual_memory()
126
+ console.print(f"🧠 RAM: {memory.total // (1024**3)} GB total, {memory.available // (1024**3)} GB available")
127
+ except ImportError:
128
+ console.print("⚠️ Cannot check memory (psutil missing)")
129
+
130
+ # Run dependency check
131
+ console.print("\n" + "="*50)
132
+ ctx = click.Context(verify_deps)
133
+ ctx.invoke(verify_deps)
@@ -5,23 +5,96 @@ import logging
5
5
  from typing import Optional
6
6
  from rich.console import Console
7
7
  from rich.table import Table
8
- from rich.progress import Progress, SpinnerColumn, TextColumn
8
+ from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, DownloadColumn, TransferSpeedColumn, TimeRemainingColumn
9
9
  from rich import print as rprint
10
+ import time
10
11
 
12
+ from .. import __version__, print_version
11
13
  from ..core.models.manager import model_manager
12
14
  from ..core.config.settings import settings
13
15
  from ..api.server import run_server
14
16
 
15
17
  console = Console()
16
18
 
17
- @click.group()
19
+ class OllamaStyleProgress:
20
+ """Enhanced progress tracker that mimics Ollama's progress display"""
21
+
22
+ def __init__(self, console: Console):
23
+ self.console = console
24
+ self.last_message = ""
25
+
26
+ def update(self, message: str):
27
+ """Update progress with a message"""
28
+ # Skip duplicate messages
29
+ if message == self.last_message:
30
+ return
31
+
32
+ self.last_message = message
33
+
34
+ # Handle different types of messages
35
+ if message.startswith("pulling ") and ":" in message and "%" in message:
36
+ # This is a file progress message from download_utils
37
+ # Format: "pulling e6a7edc1a4d7: 12% ▕██ ▏ 617 MB/5200 MB 44 MB/s 1m44s"
38
+ self.console.print(message)
39
+ elif message.startswith("pulling manifest"):
40
+ self.console.print(message)
41
+ elif message.startswith("📦 Repository:"):
42
+ # Repository info
43
+ self.console.print(f"[dim]{message}[/dim]")
44
+ elif message.startswith("📁 Found"):
45
+ # Existing files info
46
+ self.console.print(f"[dim]{message}[/dim]")
47
+ elif message.startswith("✅") and "download completed" in message:
48
+ self.console.print(f"[green]{message}[/green]")
49
+ elif message.startswith("❌"):
50
+ self.console.print(f"[red]{message}[/red]")
51
+ elif message.startswith("⚠️"):
52
+ self.console.print(f"[yellow]{message}[/yellow]")
53
+ else:
54
+ # For other messages, print with dimmed style
55
+ self.console.print(f"[dim]{message}[/dim]")
56
+
57
+ @click.group(invoke_without_command=True)
18
58
  @click.option('--verbose', '-v', is_flag=True, help='Enable verbose output')
19
- def cli(verbose):
59
+ @click.option('--version', '-V', is_flag=True, help='Show version and exit')
60
+ @click.option('--mode', type=click.Choice(['cli', 'api', 'ui']), help='Running mode: cli (command line), api (API server), ui (Web interface)')
61
+ @click.option('--host', default=None, help='Server host address (for api/ui modes)')
62
+ @click.option('--port', type=int, default=None, help='Server port (for api/ui modes)')
63
+ @click.pass_context
64
+ def cli(ctx, verbose, version, mode, host, port):
20
65
  """OllamaDiffuser - Image generation model management tool"""
66
+ if version:
67
+ print_version()
68
+ sys.exit(0)
69
+
21
70
  if verbose:
22
71
  logging.basicConfig(level=logging.DEBUG)
23
72
  else:
24
73
  logging.basicConfig(level=logging.WARNING)
74
+
75
+ # Handle mode-based execution
76
+ if mode:
77
+ if mode == 'api':
78
+ rprint("[blue]Starting OllamaDiffuser API server...[/blue]")
79
+ run_server(host=host, port=port)
80
+ sys.exit(0)
81
+ elif mode == 'ui':
82
+ rprint("[blue]Starting OllamaDiffuser Web UI...[/blue]")
83
+ import uvicorn
84
+ from ..ui.web import create_ui_app
85
+ app = create_ui_app()
86
+ ui_host = host or settings.server.host
87
+ ui_port = port or (settings.server.port + 1) # Web UI uses different port
88
+ uvicorn.run(app, host=ui_host, port=ui_port)
89
+ sys.exit(0)
90
+ elif mode == 'cli':
91
+ # Continue with normal CLI processing
92
+ pass
93
+
94
+ # If no subcommand is provided and no mode/version flag, show help
95
+ if ctx.invoked_subcommand is None and not version and not mode:
96
+ rprint(ctx.get_help())
97
+ sys.exit(0)
25
98
 
26
99
  @cli.command()
27
100
  @click.argument('model_name')
@@ -30,24 +103,26 @@ def pull(model_name: str, force: bool):
30
103
  """Download model"""
31
104
  rprint(f"[blue]Downloading model: {model_name}[/blue]")
32
105
 
33
- with Progress(
34
- SpinnerColumn(),
35
- TextColumn("[progress.description]{task.description}"),
36
- console=console
37
- ) as progress:
38
- task = progress.add_task(f"Downloading {model_name}...", total=None)
39
-
40
- def progress_callback(message: str):
41
- """Update progress display with download status"""
42
- progress.update(task, description=message)
43
-
106
+ # Use the new Ollama-style progress tracker
107
+ progress_tracker = OllamaStyleProgress(console)
108
+
109
+ def progress_callback(message: str):
110
+ """Enhanced progress callback with Ollama-style display"""
111
+ progress_tracker.update(message)
112
+
113
+ try:
44
114
  if model_manager.pull_model(model_name, force=force, progress_callback=progress_callback):
45
- progress.update(task, description=f"✅ {model_name} download completed")
115
+ progress_tracker.update("✅ download completed")
46
116
  rprint(f"[green]Model {model_name} downloaded successfully![/green]")
47
117
  else:
48
- progress.update(task, description=f"❌ {model_name} download failed")
49
118
  rprint(f"[red]Model {model_name} download failed![/red]")
50
119
  sys.exit(1)
120
+ except KeyboardInterrupt:
121
+ rprint("\n[yellow]Download cancelled by user[/yellow]")
122
+ sys.exit(1)
123
+ except Exception as e:
124
+ rprint(f"[red]Download failed: {str(e)}[/red]")
125
+ sys.exit(1)
51
126
 
52
127
  @cli.command()
53
128
  @click.argument('model_name')
@@ -849,8 +924,39 @@ def show(lora_name: str):
849
924
  @cli.command()
850
925
  def version():
851
926
  """Show version information"""
852
- rprint("[bold cyan]OllamaDiffuser v1.0.0[/bold cyan]")
853
- rprint("Image generation model management tool")
927
+ print_version()
928
+ rprint("\n[bold]Features:[/bold]")
929
+ rprint("• 🚀 Fast Startup with lazy loading architecture")
930
+ rprint("• 🎛️ ControlNet Support with 10+ control types")
931
+ rprint("• 🔄 LoRA Integration with dynamic loading")
932
+ rprint("• 🌐 Multiple Interfaces: CLI, Python API, Web UI, REST API")
933
+ rprint("• 📦 Easy model management and switching")
934
+ rprint("• ⚡ Performance optimized with GPU acceleration")
935
+
936
+ rprint("\n[bold]Supported Models:[/bold]")
937
+ rprint("• FLUX.1-schnell (Apache 2.0, Commercial OK, 4-step generation)")
938
+ rprint("• FLUX.1-dev (Non-commercial, High quality, 50-step generation)")
939
+ rprint("• Stable Diffusion 3.5 Medium")
940
+ rprint("• Stable Diffusion XL Base")
941
+ rprint("• Stable Diffusion 1.5")
942
+ rprint("• ControlNet models for SD15 and SDXL")
943
+
944
+ rprint("\n[dim]For help: ollamadiffuser --help[/dim]")
945
+ rprint("[dim]For diagnostics: ollamadiffuser doctor[/dim]")
946
+
947
+ @cli.command(name='verify-deps')
948
+ def verify_deps_cmd():
949
+ """Verify and install missing dependencies"""
950
+ from .commands import verify_deps
951
+ ctx = click.Context(verify_deps)
952
+ ctx.invoke(verify_deps)
953
+
954
+ @cli.command()
955
+ def doctor():
956
+ """Run comprehensive system diagnostics"""
957
+ from .commands import doctor
958
+ ctx = click.Context(doctor)
959
+ ctx.invoke(doctor)
854
960
 
855
961
  if __name__ == '__main__':
856
962
  cli()
@@ -1,4 +1,10 @@
1
- import cv2
1
+ try:
2
+ import cv2
3
+ CV2_AVAILABLE = True
4
+ except ImportError:
5
+ CV2_AVAILABLE = False
6
+ cv2 = None
7
+
2
8
  import numpy as np
3
9
  from PIL import Image
4
10
  import logging
@@ -15,6 +21,13 @@ class ControlNetPreprocessorManager:
15
21
  self._initialized = False
16
22
  self._initialization_attempted = False
17
23
  self._available_types = []
24
+
25
+ # Check dependencies on initialization
26
+ if not CV2_AVAILABLE:
27
+ logger.warning(
28
+ "OpenCV (cv2) is not installed. "
29
+ "Install it with: pip install opencv-python>=4.8.0"
30
+ )
18
31
 
19
32
  def is_initialized(self) -> bool:
20
33
  """Check if preprocessors are initialized"""
@@ -22,6 +35,9 @@ class ControlNetPreprocessorManager:
22
35
 
23
36
  def is_available(self) -> bool:
24
37
  """Check if ControlNet preprocessors are available"""
38
+ if not CV2_AVAILABLE:
39
+ return False
40
+
25
41
  if not self._initialization_attempted:
26
42
  # Try a lightweight check without full initialization
27
43
  try:
@@ -41,6 +57,12 @@ class ControlNetPreprocessorManager:
41
57
  Returns:
42
58
  True if initialization successful, False otherwise
43
59
  """
60
+ if not CV2_AVAILABLE:
61
+ raise ImportError(
62
+ "OpenCV (cv2) is required for ControlNet preprocessors. "
63
+ "Install it with: pip install opencv-python>=4.8.0"
64
+ )
65
+
44
66
  if self._initialized and not force:
45
67
  return True
46
68
 
@@ -164,17 +186,28 @@ class ControlNetPreprocessorManager:
164
186
 
165
187
  def _init_basic_processors(self):
166
188
  """Initialize basic OpenCV-based processors as fallback"""
167
- logger.info("Using basic OpenCV-based preprocessors")
168
- self.processors = {
169
- 'canny': self._canny_opencv,
170
- 'depth': self._depth_basic,
171
- 'scribble': self._scribble_basic,
172
- }
189
+ if not CV2_AVAILABLE:
190
+ logger.warning("OpenCV not available, using minimal fallback processors")
191
+ self.processors = {
192
+ 'canny': self._simple_edge_fallback,
193
+ 'depth': self._simple_depth_fallback,
194
+ 'scribble': self._simple_edge_fallback,
195
+ }
196
+ else:
197
+ logger.info("Using basic OpenCV-based preprocessors")
198
+ self.processors = {
199
+ 'canny': self._canny_opencv,
200
+ 'depth': self._depth_basic,
201
+ 'scribble': self._scribble_basic,
202
+ }
173
203
  self._initialized = True
174
204
  self._available_types = list(self.processors.keys())
175
205
 
176
206
  def _canny_opencv(self, image: Image.Image, low_threshold: int = 100, high_threshold: int = 200) -> Image.Image:
177
207
  """Basic Canny edge detection using OpenCV"""
208
+ if not CV2_AVAILABLE:
209
+ raise ImportError("OpenCV is required for Canny edge detection")
210
+
178
211
  # Convert PIL to OpenCV format
179
212
  image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
180
213
 
@@ -187,6 +220,9 @@ class ControlNetPreprocessorManager:
187
220
 
188
221
  def _depth_basic(self, image: Image.Image) -> Image.Image:
189
222
  """Basic depth estimation using simple gradients"""
223
+ if not CV2_AVAILABLE:
224
+ raise ImportError("OpenCV is required for depth estimation")
225
+
190
226
  # Convert to grayscale
191
227
  gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
192
228
 
@@ -207,6 +243,9 @@ class ControlNetPreprocessorManager:
207
243
 
208
244
  def _scribble_basic(self, image: Image.Image) -> Image.Image:
209
245
  """Basic scribble detection using edge detection"""
246
+ if not CV2_AVAILABLE:
247
+ raise ImportError("OpenCV is required for scribble detection")
248
+
210
249
  # Convert to grayscale
211
250
  gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
212
251
 
@@ -221,6 +260,39 @@ class ControlNetPreprocessorManager:
221
260
  edges_rgb = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
222
261
  return Image.fromarray(edges_rgb)
223
262
 
263
+ def _simple_edge_fallback(self, image: Image.Image, **kwargs) -> Image.Image:
264
+ """Simple edge detection fallback using PIL/numpy only"""
265
+ # Convert to numpy array
266
+ img_array = np.array(image.convert('L')) # Convert to grayscale
267
+
268
+ # Simple gradient-based edge detection
269
+ grad_x = np.abs(np.diff(img_array, axis=1))
270
+ grad_y = np.abs(np.diff(img_array, axis=0))
271
+
272
+ # Pad to maintain original size
273
+ grad_x = np.pad(grad_x, ((0, 0), (0, 1)), mode='edge')
274
+ grad_y = np.pad(grad_y, ((0, 1), (0, 0)), mode='edge')
275
+
276
+ # Combine gradients
277
+ edges = np.sqrt(grad_x**2 + grad_y**2)
278
+ edges = np.uint8(255 * edges / np.max(edges) if np.max(edges) > 0 else edges)
279
+
280
+ # Convert to RGB
281
+ edges_rgb = np.stack([edges, edges, edges], axis=2)
282
+ return Image.fromarray(edges_rgb)
283
+
284
+ def _simple_depth_fallback(self, image: Image.Image, **kwargs) -> Image.Image:
285
+ """Simple depth estimation fallback using PIL/numpy only"""
286
+ # Convert to grayscale
287
+ gray = np.array(image.convert('L'))
288
+
289
+ # Simple depth based on intensity (brighter = closer)
290
+ depth = 255 - gray # Invert so darker areas are "further"
291
+
292
+ # Convert to RGB
293
+ depth_rgb = np.stack([depth, depth, depth], axis=2)
294
+ return Image.fromarray(depth_rgb)
295
+
224
296
  def preprocess(self,
225
297
  image: Union[Image.Image, str],
226
298
  control_type: str,
@@ -11,25 +11,107 @@ from pathlib import Path
11
11
  from huggingface_hub import snapshot_download, hf_hub_download, HfApi
12
12
  from tqdm import tqdm
13
13
  import threading
14
+ import requests
14
15
 
15
16
  logger = logging.getLogger(__name__)
16
17
 
17
- class ProgressTracker:
18
- """Track download progress across multiple files"""
18
+ class EnhancedProgressTracker:
19
+ """Enhanced progress tracker that provides Ollama-style detailed progress information"""
19
20
 
20
21
  def __init__(self, total_files: int = 0, progress_callback: Optional[Callable] = None):
21
22
  self.total_files = total_files
22
23
  self.completed_files = 0
23
24
  self.current_file = ""
24
25
  self.file_progress = {}
26
+ self.file_start_times = {}
27
+ self.file_speeds = {}
25
28
  self.progress_callback = progress_callback
26
29
  self.lock = threading.Lock()
30
+ self.overall_start_time = time.time()
31
+ self.total_size = 0
32
+ self.downloaded_size = 0
27
33
 
34
+ def set_total_size(self, total_size: int):
35
+ """Set the total size for all files"""
36
+ with self.lock:
37
+ self.total_size = total_size
38
+
39
+ def start_file(self, filename: str, file_size: int = 0):
40
+ """Mark a file as started"""
41
+ with self.lock:
42
+ self.current_file = filename
43
+ self.file_start_times[filename] = time.time()
44
+ self.file_progress[filename] = (0, file_size)
45
+
46
+ # Extract hash-like identifier for Ollama-style display
47
+ import re
48
+ hash_match = re.search(r'([a-f0-9]{8,})', filename)
49
+ if hash_match:
50
+ display_name = hash_match.group(1)[:12] # First 12 characters
51
+ else:
52
+ # Fallback to filename without extension
53
+ display_name = Path(filename).stem[:12]
54
+
55
+ if self.progress_callback:
56
+ self.progress_callback(f"pulling {display_name}")
57
+
28
58
  def update_file_progress(self, filename: str, downloaded: int, total: int):
29
- """Update progress for a specific file"""
59
+ """Update progress for a specific file with speed calculation"""
30
60
  with self.lock:
61
+ current_time = time.time()
62
+
63
+ # Update file progress
64
+ old_downloaded = self.file_progress.get(filename, (0, 0))[0]
31
65
  self.file_progress[filename] = (downloaded, total)
32
- self._report_progress()
66
+
67
+ # Update overall downloaded size
68
+ size_diff = downloaded - old_downloaded
69
+ self.downloaded_size += size_diff
70
+
71
+ # Calculate speed for this file
72
+ if filename in self.file_start_times:
73
+ elapsed = current_time - self.file_start_times[filename]
74
+ if elapsed > 0 and downloaded > 0:
75
+ speed = downloaded / elapsed # bytes per second
76
+ self.file_speeds[filename] = speed
77
+
78
+ # Report progress in Ollama style
79
+ if self.progress_callback and total > 0:
80
+ percentage = (downloaded / total) * 100
81
+
82
+ # Format sizes
83
+ downloaded_mb = downloaded / (1024 * 1024)
84
+ total_mb = total / (1024 * 1024)
85
+
86
+ # Calculate speed in MB/s
87
+ speed_mbps = self.file_speeds.get(filename, 0) / (1024 * 1024)
88
+
89
+ # Calculate ETA
90
+ if speed_mbps > 0:
91
+ remaining_mb = total_mb - downloaded_mb
92
+ eta_seconds = remaining_mb / speed_mbps
93
+ eta_min = int(eta_seconds // 60)
94
+ eta_sec = int(eta_seconds % 60)
95
+ eta_str = f"{eta_min}m{eta_sec:02d}s"
96
+ else:
97
+ eta_str = "?"
98
+
99
+ # Extract hash for display
100
+ import re
101
+ hash_match = re.search(r'([a-f0-9]{8,})', filename)
102
+ if hash_match:
103
+ display_name = hash_match.group(1)[:12]
104
+ else:
105
+ display_name = Path(filename).stem[:12]
106
+
107
+ # Create progress bar
108
+ bar_width = 20
109
+ filled = int((percentage / 100) * bar_width)
110
+ bar = "█" * filled + " " * (bar_width - filled)
111
+
112
+ progress_msg = f"pulling {display_name}: {percentage:3.0f}% ▕{bar}▏ {downloaded_mb:.0f} MB/{total_mb:.0f} MB {speed_mbps:.0f} MB/s {eta_str}"
113
+
114
+ self.progress_callback(progress_msg)
33
115
 
34
116
  def complete_file(self, filename: str):
35
117
  """Mark a file as completed"""
@@ -38,34 +120,42 @@ class ProgressTracker:
38
120
  if filename in self.file_progress:
39
121
  downloaded, total = self.file_progress[filename]
40
122
  self.file_progress[filename] = (total, total)
41
- self._report_progress()
42
-
43
- def set_current_file(self, filename: str):
44
- """Set the currently downloading file"""
45
- with self.lock:
46
- self.current_file = filename
47
- self._report_progress()
123
+
124
+ # Report completion
125
+ if self.progress_callback:
126
+ import re
127
+ hash_match = re.search(r'([a-f0-9]{8,})', filename)
128
+ if hash_match:
129
+ display_name = hash_match.group(1)[:12]
130
+ else:
131
+ display_name = Path(filename).stem[:12]
132
+
133
+ total_mb = self.file_progress.get(filename, (0, 0))[1] / (1024 * 1024)
134
+ self.progress_callback(f"pulling {display_name}: 100% ▕████████████████████▏ {total_mb:.0f} MB/{total_mb:.0f} MB")
48
135
 
49
- def _report_progress(self):
50
- """Report current progress"""
136
+ def report_overall_progress(self):
137
+ """Report overall progress"""
51
138
  if self.progress_callback:
52
- # Calculate overall progress
53
- total_downloaded = 0
54
- total_size = 0
55
-
56
- for downloaded, size in self.file_progress.values():
57
- total_downloaded += downloaded
58
- total_size += size
59
-
60
- progress_msg = f"Files: {self.completed_files}/{self.total_files}"
61
- if total_size > 0:
62
- percent = (total_downloaded / total_size) * 100
63
- progress_msg += f" | Overall: {percent:.1f}%"
64
-
65
- if self.current_file:
66
- progress_msg += f" | Current: {self.current_file}"
67
-
68
- self.progress_callback(progress_msg)
139
+ if self.total_size > 0:
140
+ overall_percent = (self.downloaded_size / self.total_size) * 100
141
+ downloaded_gb = self.downloaded_size / (1024 * 1024 * 1024)
142
+ total_gb = self.total_size / (1024 * 1024 * 1024)
143
+
144
+ elapsed = time.time() - self.overall_start_time
145
+ if elapsed > 0:
146
+ overall_speed = self.downloaded_size / elapsed / (1024 * 1024) # MB/s
147
+
148
+ if overall_speed > 0:
149
+ remaining_gb = total_gb - downloaded_gb
150
+ eta_seconds = (remaining_gb * 1024) / overall_speed # Convert GB to MB for calculation
151
+ eta_min = int(eta_seconds // 60)
152
+ eta_sec = int(eta_seconds % 60)
153
+ eta_str = f"{eta_min}m{eta_sec:02d}s"
154
+ else:
155
+ eta_str = "?"
156
+
157
+ progress_msg = f"Overall progress: {overall_percent:.1f}% | {downloaded_gb:.1f} GB/{total_gb:.1f} GB | {overall_speed:.1f} MB/s | ETA: {eta_str}"
158
+ self.progress_callback(progress_msg)
69
159
 
70
160
  def configure_hf_environment():
71
161
  """Configure HuggingFace Hub environment for better downloads"""
@@ -132,7 +222,7 @@ def robust_snapshot_download(
132
222
 
133
223
  # Get file list and sizes for progress tracking
134
224
  if progress_callback:
135
- progress_callback("📋 Getting repository information...")
225
+ progress_callback("pulling manifest")
136
226
 
137
227
  file_sizes = get_repo_file_list(repo_id)
138
228
  total_size = sum(file_sizes.values())
@@ -140,20 +230,65 @@ def robust_snapshot_download(
140
230
  if progress_callback and file_sizes:
141
231
  progress_callback(f"📦 Repository: {len(file_sizes)} files, {format_size(total_size)} total")
142
232
 
233
+ # Initialize enhanced progress tracker
234
+ progress_tracker = EnhancedProgressTracker(len(file_sizes), progress_callback)
235
+ progress_tracker.set_total_size(total_size)
236
+
143
237
  # Check what's already downloaded
144
238
  local_path = Path(local_dir)
239
+ existing_size = 0
145
240
  if local_path.exists() and not force_download:
146
241
  existing_files = []
147
- existing_size = 0
148
242
  for file_path in local_path.rglob('*'):
149
243
  if file_path.is_file():
150
244
  rel_path = file_path.relative_to(local_path)
151
245
  existing_files.append(str(rel_path))
152
- existing_size += file_path.stat().st_size
246
+ file_size = file_path.stat().st_size
247
+ existing_size += file_size
248
+ # Mark existing files as completed in progress tracker
249
+ progress_tracker.file_progress[str(rel_path)] = (file_size, file_size)
250
+ progress_tracker.downloaded_size += file_size
251
+ progress_tracker.completed_files += 1
153
252
 
154
253
  if progress_callback and existing_files:
155
254
  progress_callback(f"📁 Found {len(existing_files)} existing files ({format_size(existing_size)})")
156
255
 
256
+ # Custom tqdm class to capture HuggingFace download progress
257
+ class OllamaStyleTqdm(tqdm):
258
+ def __init__(self, *args, **kwargs):
259
+ # Extract description to get filename
260
+ desc = kwargs.get('desc', '')
261
+ self.current_filename = desc
262
+
263
+ # Get file size from our pre-fetched data
264
+ file_size = file_sizes.get(self.current_filename, 0)
265
+ if file_size > 0:
266
+ kwargs['total'] = file_size
267
+
268
+ super().__init__(*args, **kwargs)
269
+
270
+ # Start tracking this file
271
+ if self.current_filename and progress_callback:
272
+ progress_tracker.start_file(self.current_filename, file_size)
273
+
274
+ def update(self, n=1):
275
+ super().update(n)
276
+
277
+ # Update our progress tracker
278
+ if self.current_filename and progress_callback:
279
+ downloaded = getattr(self, 'n', 0)
280
+ total = getattr(self, 'total', 0) or file_sizes.get(self.current_filename, 0)
281
+
282
+ if total > 0:
283
+ progress_tracker.update_file_progress(self.current_filename, downloaded, total)
284
+
285
+ def close(self):
286
+ super().close()
287
+
288
+ # Mark file as completed
289
+ if self.current_filename and progress_callback:
290
+ progress_tracker.complete_file(self.current_filename)
291
+
157
292
  last_exception = None
158
293
 
159
294
  for attempt in range(max_retries):
@@ -166,12 +301,6 @@ def robust_snapshot_download(
166
301
 
167
302
  logger.info(f"Download attempt {attempt + 1}/{max_retries} with {workers} workers")
168
303
 
169
- # Create a custom progress callback for tqdm
170
- def tqdm_callback(t):
171
- def inner(chunk_size):
172
- t.update(chunk_size)
173
- return inner
174
-
175
304
  result = snapshot_download(
176
305
  repo_id=repo_id,
177
306
  local_dir=local_dir,
@@ -181,7 +310,7 @@ def robust_snapshot_download(
181
310
  resume_download=True, # Enable resume
182
311
  etag_timeout=300 + (attempt * 60), # Increase timeout on retries
183
312
  force_download=force_download,
184
- tqdm_class=tqdm if progress_callback else None
313
+ tqdm_class=OllamaStyleTqdm if progress_callback else None
185
314
  )
186
315
 
187
316
  if progress_callback:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ollamadiffuser
3
- Version: 1.1.0
3
+ Version: 1.1.2
4
4
  Summary: 🎨 Local AI Image Generation with Ollama-style CLI for Stable Diffusion, FLUX.1, and LoRA support
5
5
  Home-page: https://github.com/ollamadiffuser/ollamadiffuser
6
6
  Author: OllamaDiffuser Team
@@ -70,6 +70,7 @@ Dynamic: requires-python
70
70
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
71
71
  [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
72
72
 
73
+
73
74
  ## Local AI Image Generation with OllamaDiffuser
74
75
 
75
76
  **OllamaDiffuser** simplifies local deployment of **Stable Diffusion**, **FLUX.1**, and other AI image generation models. An intuitive **local SD** tool inspired by **Ollama's** simplicity - perfect for **local diffuser** workflows with CLI, web UI, and LoRA support.
@@ -109,7 +110,7 @@ curl -X POST http://localhost:8000/api/generate \
109
110
  ### Option 2: Development Installation
110
111
  ```bash
111
112
  # Clone the repository
112
- git clone https://github.com/yourusername/ollamadiffuser.git
113
+ git clone https://github.com/ollamadiffuser/ollamadiffuser.git
113
114
  cd ollamadiffuser
114
115
 
115
116
  # Install dependencies
@@ -118,17 +119,25 @@ pip install -e .
118
119
 
119
120
  ### Basic Usage
120
121
  ```bash
122
+ # Check version
123
+ ollamadiffuser -V
124
+
121
125
  # Install a model
122
126
  ollamadiffuser pull stable-diffusion-1.5
123
127
 
124
- # Load the model
125
- ollamadiffuser load stable-diffusion-1.5
128
+ # Run the model (loads and starts API server)
129
+ ollamadiffuser run stable-diffusion-1.5
126
130
 
127
- # Generate an image
128
- ollamadiffuser generate "a beautiful sunset over mountains"
131
+ # Generate an image via API
132
+ curl -X POST http://localhost:8000/api/generate \
133
+ -H "Content-Type: application/json" \
134
+ -d '{"prompt": "a beautiful sunset over mountains"}' \
135
+ --output image.png
129
136
 
130
137
  # Start web interface
131
138
  ollamadiffuser --mode ui
139
+
140
+ open http://localhost:8001
132
141
  ```
133
142
 
134
143
  ### ControlNet Quick Start
@@ -136,8 +145,8 @@ ollamadiffuser --mode ui
136
145
  # Install ControlNet model
137
146
  ollamadiffuser pull controlnet-canny-sd15
138
147
 
139
- # Load ControlNet model
140
- ollamadiffuser load controlnet-canny-sd15
148
+ # Run ControlNet model (loads and starts API server)
149
+ ollamadiffuser run controlnet-canny-sd15
141
150
 
142
151
  # Generate with control image
143
152
  curl -X POST http://localhost:8000/api/generate/controlnet \
@@ -219,21 +228,29 @@ ollamadiffuser lora unload
219
228
 
220
229
  ### Command Line Interface
221
230
  ```bash
222
- # Generate with advanced parameters
223
- ollamadiffuser generate \
224
- "a futuristic cityscape" \
225
- --negative-prompt "blurry, low quality" \
226
- --steps 30 \
227
- --guidance 7.5 \
228
- --width 1024 \
229
- --height 1024
231
+ # Pull and run a model
232
+ ollamadiffuser pull stable-diffusion-1.5
233
+ ollamadiffuser run stable-diffusion-1.5
234
+
235
+ # In another terminal, generate images via API
236
+ curl -X POST http://localhost:8000/api/generate \
237
+ -H "Content-Type: application/json" \
238
+ -d '{
239
+ "prompt": "a futuristic cityscape",
240
+ "negative_prompt": "blurry, low quality",
241
+ "num_inference_steps": 30,
242
+ "guidance_scale": 7.5,
243
+ "width": 1024,
244
+ "height": 1024
245
+ }' \
246
+ --output image.png
230
247
  ```
231
248
 
232
249
  ### Web UI
233
250
  ```bash
234
251
  # Start web interface
235
252
  ollamadiffuser --mode ui
236
- # Open http://localhost:8001
253
+ Open http://localhost:8001
237
254
  ```
238
255
 
239
256
  Features:
@@ -247,10 +264,15 @@ Features:
247
264
  # Start API server
248
265
  ollamadiffuser --mode api
249
266
 
267
+ ollamadiffuser load stable-diffusion-1.5
268
+
250
269
  # Generate image
251
270
  curl -X POST http://localhost:8000/api/generate \
252
271
  -H "Content-Type: application/json" \
253
272
  -d '{"prompt": "a beautiful landscape", "width": 1024, "height": 1024}'
273
+
274
+ # API document
275
+ http://localhost:8000/docs
254
276
  ```
255
277
 
256
278
  ### Python API
@@ -258,16 +280,19 @@ curl -X POST http://localhost:8000/api/generate \
258
280
  from ollamadiffuser.core.models.manager import model_manager
259
281
 
260
282
  # Load model
261
- model_manager.load_model("stable-diffusion-1.5")
262
- engine = model_manager.loaded_model
263
-
264
- # Generate image
265
- image = engine.generate_image(
266
- prompt="a beautiful sunset",
267
- width=1024,
268
- height=1024
269
- )
270
- image.save("output.jpg")
283
+ success = model_manager.load_model("stable-diffusion-1.5")
284
+ if success:
285
+ engine = model_manager.loaded_model
286
+
287
+ # Generate image
288
+ image = engine.generate_image(
289
+ prompt="a beautiful sunset",
290
+ width=1024,
291
+ height=1024
292
+ )
293
+ image.save("output.jpg")
294
+ else:
295
+ print("Failed to load model")
271
296
  ```
272
297
 
273
298
  ## 📦 Supported Models
@@ -297,18 +322,6 @@ Models are automatically configured with optimal settings:
297
322
  - **Precision Handling**: FP16/BF16 support for efficiency
298
323
  - **Safety Features**: NSFW filter bypass for creative freedom
299
324
 
300
- ### Performance Tuning
301
- ```bash
302
- # Enable verbose logging
303
- ollamadiffuser -v generate "test prompt"
304
-
305
- # Check system status
306
- ollamadiffuser status
307
-
308
- # Monitor memory usage
309
- ollamadiffuser info
310
- ```
311
-
312
325
  ## 🔧 Advanced Usage
313
326
 
314
327
  ### ControlNet Parameters
@@ -331,8 +344,9 @@ from ollamadiffuser.core.utils.controlnet_preprocessors import controlnet_prepro
331
344
  controlnet_preprocessor.initialize()
332
345
 
333
346
  # Process multiple images
334
- for image_path in image_list:
335
- control_img = controlnet_preprocessor.preprocess(image, "canny")
347
+ prompt = "beautiful landscape" # Define the prompt
348
+ for i, image_path in enumerate(image_list):
349
+ control_img = controlnet_preprocessor.preprocess(image_path, "canny")
336
350
  result = engine.generate_image(prompt, control_image=control_img)
337
351
  result.save(f"output_{i}.jpg")
338
352
  ```
@@ -360,8 +374,6 @@ with open("control.jpg", "rb") as f:
360
374
  ## 📚 Documentation & Guides
361
375
 
362
376
  - **[ControlNet Guide](CONTROLNET_GUIDE.md)**: Comprehensive ControlNet usage and examples
363
- - **[LoRA Guide](LORA_GUIDE.md)**: LoRA management and best practices
364
- - **[API Reference](API_REFERENCE.md)**: Complete API documentation
365
377
  - **[Website Documentation](https://www.ollamadiffuser.com/)**: Complete tutorials and guides
366
378
 
367
379
  ## 🚀 Performance & Hardware
@@ -383,6 +395,43 @@ with open("control.jpg", "rb") as f:
383
395
 
384
396
  ## 🔧 Troubleshooting
385
397
 
398
+ ### Installation Issues
399
+
400
+ #### Missing Dependencies (cv2/OpenCV Error)
401
+ If you encounter `ModuleNotFoundError: No module named 'cv2'`, run:
402
+
403
+ ```bash
404
+ # Quick fix
405
+ pip install opencv-python>=4.8.0
406
+
407
+ # Or use the built-in verification tool
408
+ ollamadiffuser verify-deps
409
+
410
+ # Or install with all optional dependencies
411
+ pip install ollamadiffuser[full]
412
+ ```
413
+
414
+ #### Complete Dependency Check
415
+ ```bash
416
+ # Run comprehensive system diagnostics
417
+ ollamadiffuser doctor
418
+
419
+ # Verify and install missing dependencies interactively
420
+ ollamadiffuser verify-deps
421
+ ```
422
+
423
+ #### Clean Installation
424
+ If you're having persistent issues:
425
+
426
+ ```bash
427
+ # Uninstall and reinstall
428
+ pip uninstall ollamadiffuser
429
+ pip install --no-cache-dir ollamadiffuser[full]
430
+
431
+ # Verify installation
432
+ ollamadiffuser verify-deps
433
+ ```
434
+
386
435
  ### Common Issues
387
436
 
388
437
  #### Slow Startup
@@ -407,46 +456,49 @@ curl -X POST http://localhost:8000/api/controlnet/initialize
407
456
 
408
457
  #### Memory Issues
409
458
  ```bash
410
- # Use smaller image sizes
411
- ollamadiffuser generate "test" --width 512 --height 512
459
+ # Use smaller image sizes via API
460
+ curl -X POST http://localhost:8000/api/generate \
461
+ -H "Content-Type: application/json" \
462
+ -d '{"prompt": "test", "width": 512, "height": 512}' \
463
+ --output test.png
412
464
 
413
- # Enable CPU offloading (automatic)
414
- # Close other applications
465
+ # CPU offloading is automatic
466
+ # Close other applications to free memory
415
467
  # Use basic preprocessors instead of advanced ones
416
468
  ```
417
469
 
418
- ### Debug Mode
419
- ```bash
420
- # Enable verbose logging
421
- ollamadiffuser -v run model-name
470
+ ### Platform-Specific Issues
422
471
 
423
- # Check system information
424
- ollamadiffuser info
425
-
426
- # Validate installation
427
- ollamadiffuser doctor
472
+ #### macOS Apple Silicon
473
+ ```bash
474
+ # If you encounter OpenCV issues on Apple Silicon
475
+ pip uninstall opencv-python
476
+ pip install opencv-python-headless>=4.8.0
428
477
  ```
429
478
 
430
- ## 🤝 Contributing
431
-
432
- We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
479
+ #### Windows
480
+ ```bash
481
+ # If you encounter build errors
482
+ pip install --only-binary=all opencv-python>=4.8.0
483
+ ```
433
484
 
434
- ### Development Setup
485
+ #### Linux
435
486
  ```bash
436
- # Clone repository
437
- git clone https://github.com/yourusername/ollamadiffuser.git
438
- cd ollamadiffuser
487
+ # If you need system dependencies
488
+ sudo apt-get update
489
+ sudo apt-get install libgl1-mesa-glx libglib2.0-0
490
+ pip install opencv-python>=4.8.0
491
+ ```
439
492
 
440
- # Install in development mode
441
- pip install -e ".[dev]"
493
+ ### Debug Mode
494
+ ```bash
495
+ # Enable verbose logging
496
+ ollamadiffuser --verbose run model-name
497
+ ```
442
498
 
443
- # Run tests
444
- pytest tests/
499
+ ## 🤝 Contributing
445
500
 
446
- # Run linting
447
- flake8 ollamadiffuser/
448
- black ollamadiffuser/
449
- ```
501
+ We welcome contributions! Please check the GitHub repository for contribution guidelines.
450
502
 
451
503
  ## 🤝 Community & Support
452
504
 
@@ -476,9 +528,8 @@ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file
476
528
 
477
529
  ## 📞 Support
478
530
 
479
- - **Documentation**: [Full documentation](docs/)
480
- - **Issues**: [GitHub Issues](https://github.com/yourusername/ollamadiffuser/issues)
481
- - **Discussions**: [GitHub Discussions](https://github.com/yourusername/ollamadiffuser/discussions)
531
+ - **Issues**: [GitHub Issues](https://github.com/ollamadiffuser/ollamadiffuser/issues)
532
+ - **Discussions**: [GitHub Discussions](https://github.com/ollamadiffuser/ollamadiffuser/discussions)
482
533
 
483
534
  ---
484
535
 
@@ -1,9 +1,10 @@
1
- ollamadiffuser/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- ollamadiffuser/__main__.py,sha256=-3WVAex0P3PgfIUtOhVof9nF4Zor8mvMNTK5nKFlrHU,1841
1
+ ollamadiffuser/__init__.py,sha256=g3eMrHnQLoWpqIII85gv5ScYrmImGjtRIoM8-qEK7o8,1127
2
+ ollamadiffuser/__main__.py,sha256=tNWMvEHq4ddtKLp7DrhIoOdnFw3F8RNrETC_u5xpkFI,141
3
3
  ollamadiffuser/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  ollamadiffuser/api/server.py,sha256=4-3gT8W1404bxvJ7y9htvKbd2yxrrbtAUvT7shOlJss,17679
5
5
  ollamadiffuser/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- ollamadiffuser/cli/main.py,sha256=V0UEEGVBeMJ_I0tem1YJWCyX4yO6GjqEBUMH14U5Hm0,37045
6
+ ollamadiffuser/cli/commands.py,sha256=qEYCAt07O37kAPBNlhYwyS9rEZWw-rw5UgtiqWoZGdo,4715
7
+ ollamadiffuser/cli/main.py,sha256=f1jopRvZsfgQ49gvb-iTMCP_XCmHpuSnTUrYqB4Mp5w,41481
7
8
  ollamadiffuser/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
9
  ollamadiffuser/core/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
10
  ollamadiffuser/core/config/settings.py,sha256=VhI1vLGmOAQ7-XtyHrT5KoMpcGeGt-Mij-9NxX_ZKsI,4881
@@ -12,16 +13,16 @@ ollamadiffuser/core/inference/engine.py,sha256=ky76lAjWexlrgmHSZZILa3FPQP7xx2WQ0
12
13
  ollamadiffuser/core/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
14
  ollamadiffuser/core/models/manager.py,sha256=vO1Az_aO5lZKMgSyK_6j2wT5nzPMowZgKhcH2mQVLkI,24139
14
15
  ollamadiffuser/core/utils/__init__.py,sha256=ZdXZWX1hfDnnV6OmRD6UStNljDJIQ892da2CtC-zdDw,31
15
- ollamadiffuser/core/utils/controlnet_preprocessors.py,sha256=JcLxvnuYAJdQb9EM1mAgyye1nARHwyjFdWzyl7yh7So,12684
16
- ollamadiffuser/core/utils/download_utils.py,sha256=LSsTMSpFS39KSnZwufTw1Z1eu3w7p-BbrTgrWHzudcs,14806
16
+ ollamadiffuser/core/utils/controlnet_preprocessors.py,sha256=v21X_Bk-a4gKbUZUKoeP2W8TSGlv-ST8IYNsn3NrZ2c,15446
17
+ ollamadiffuser/core/utils/download_utils.py,sha256=DvCt-cjH6WSBJniJT112b4a9AUzlwOYhQtPuEfISmtM,20961
17
18
  ollamadiffuser/core/utils/lora_manager.py,sha256=SrZydPSGJqCS_Vek35bEdG2Q51qCOLZmPvnNzUjjIN0,14328
18
19
  ollamadiffuser/ui/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
20
  ollamadiffuser/ui/web.py,sha256=xZ5Ja47B-51LRyadfC-gW_aE_B3D571RgpQX0RDzVxM,15290
20
21
  ollamadiffuser/ui/templates/index.html,sha256=qTQVFxiTbeZ90O-iNqWC_4pYP6yyIs2z6U69VJPqAB4,38176
21
22
  ollamadiffuser/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- ollamadiffuser-1.1.0.dist-info/licenses/LICENSE,sha256=cnGL9l2P510Uk3TCnv62kot6vAfdSawhOZh7Y-oYoIE,1071
23
- ollamadiffuser-1.1.0.dist-info/METADATA,sha256=vLK3Xz7s81u4jcPgWZCdP_oAUUKg1EuDAPbRYm6ygnE,15278
24
- ollamadiffuser-1.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
- ollamadiffuser-1.1.0.dist-info/entry_points.txt,sha256=tHXXO3N0GSnIobDe_eSOLfHPjjVFjeTg2Fd-APoD6sY,64
26
- ollamadiffuser-1.1.0.dist-info/top_level.txt,sha256=97wOGgTCxDE765Nr_o7B4Kwr_M_jy8fCCeQ81sMKlC4,15
27
- ollamadiffuser-1.1.0.dist-info/RECORD,,
23
+ ollamadiffuser-1.1.2.dist-info/licenses/LICENSE,sha256=cnGL9l2P510Uk3TCnv62kot6vAfdSawhOZh7Y-oYoIE,1071
24
+ ollamadiffuser-1.1.2.dist-info/METADATA,sha256=CQ_rn3y4ktSe8wqTCPBbtoNf6Wz0xHu_rPTq1m5l3EQ,16632
25
+ ollamadiffuser-1.1.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
26
+ ollamadiffuser-1.1.2.dist-info/entry_points.txt,sha256=tHXXO3N0GSnIobDe_eSOLfHPjjVFjeTg2Fd-APoD6sY,64
27
+ ollamadiffuser-1.1.2.dist-info/top_level.txt,sha256=97wOGgTCxDE765Nr_o7B4Kwr_M_jy8fCCeQ81sMKlC4,15
28
+ ollamadiffuser-1.1.2.dist-info/RECORD,,