ollamadiffuser 1.2.2__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. ollamadiffuser/__init__.py +1 -1
  2. ollamadiffuser/api/server.py +312 -312
  3. ollamadiffuser/cli/config_commands.py +119 -0
  4. ollamadiffuser/cli/lora_commands.py +169 -0
  5. ollamadiffuser/cli/main.py +85 -1233
  6. ollamadiffuser/cli/model_commands.py +664 -0
  7. ollamadiffuser/cli/recommend_command.py +205 -0
  8. ollamadiffuser/cli/registry_commands.py +197 -0
  9. ollamadiffuser/core/config/model_registry.py +562 -11
  10. ollamadiffuser/core/config/settings.py +24 -2
  11. ollamadiffuser/core/inference/__init__.py +5 -0
  12. ollamadiffuser/core/inference/base.py +182 -0
  13. ollamadiffuser/core/inference/engine.py +204 -1405
  14. ollamadiffuser/core/inference/strategies/__init__.py +1 -0
  15. ollamadiffuser/core/inference/strategies/controlnet_strategy.py +170 -0
  16. ollamadiffuser/core/inference/strategies/flux_strategy.py +136 -0
  17. ollamadiffuser/core/inference/strategies/generic_strategy.py +164 -0
  18. ollamadiffuser/core/inference/strategies/gguf_strategy.py +113 -0
  19. ollamadiffuser/core/inference/strategies/hidream_strategy.py +104 -0
  20. ollamadiffuser/core/inference/strategies/sd15_strategy.py +134 -0
  21. ollamadiffuser/core/inference/strategies/sd3_strategy.py +80 -0
  22. ollamadiffuser/core/inference/strategies/sdxl_strategy.py +131 -0
  23. ollamadiffuser/core/inference/strategies/video_strategy.py +108 -0
  24. ollamadiffuser/mcp/__init__.py +0 -0
  25. ollamadiffuser/mcp/server.py +184 -0
  26. ollamadiffuser/ui/templates/index.html +62 -1
  27. ollamadiffuser/ui/web.py +116 -54
  28. {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/METADATA +337 -108
  29. ollamadiffuser-2.0.0.dist-info/RECORD +61 -0
  30. {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/WHEEL +1 -1
  31. {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/entry_points.txt +1 -0
  32. ollamadiffuser/core/models/registry.py +0 -384
  33. ollamadiffuser/ui/samples/.DS_Store +0 -0
  34. ollamadiffuser-1.2.2.dist-info/RECORD +0 -45
  35. {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/licenses/LICENSE +0 -0
  36. {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,205 @@
1
+ """Hardware-aware model recommendation command."""
2
+
3
+ import click
4
+ from rich.console import Console
5
+ from rich.table import Table
6
+ from rich.panel import Panel
7
+
8
+ from ..core.config.model_registry import model_registry
9
+
10
+ console = Console()
11
+
12
+
13
+ def _detect_hardware():
14
+ """Detect hardware capabilities."""
15
+ hw = {
16
+ "device": "cpu",
17
+ "device_name": "CPU",
18
+ "total_ram_gb": 0,
19
+ "available_ram_gb": 0,
20
+ "vram_gb": 0,
21
+ }
22
+
23
+ # RAM detection
24
+ try:
25
+ import psutil
26
+
27
+ mem = psutil.virtual_memory()
28
+ hw["total_ram_gb"] = round(mem.total / (1024**3), 1)
29
+ hw["available_ram_gb"] = round(mem.available / (1024**3), 1)
30
+ except ImportError:
31
+ pass
32
+
33
+ # GPU detection
34
+ try:
35
+ import torch
36
+
37
+ if torch.cuda.is_available():
38
+ hw["device"] = "cuda"
39
+ hw["device_name"] = torch.cuda.get_device_name(0)
40
+ hw["vram_gb"] = round(
41
+ torch.cuda.get_device_properties(0).total_mem / (1024**3), 1
42
+ )
43
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
44
+ hw["device"] = "mps"
45
+ hw["device_name"] = "Apple Silicon (MPS)"
46
+ # On MPS, VRAM is unified memory = total RAM
47
+ hw["vram_gb"] = hw["total_ram_gb"]
48
+ except ImportError:
49
+ pass
50
+
51
+ return hw
52
+
53
+
54
+ def _classify_model(model_name, model_info, hw):
55
+ """Classify a model as 'recommended', 'possible', 'too_large', or 'incompatible'.
56
+
57
+ Returns (tier, reason).
58
+ """
59
+ hr = model_info.get("hardware_requirements", {})
60
+ supported = [d.upper() for d in hr.get("supported_devices", [])]
61
+ device_upper = hw["device"].upper()
62
+
63
+ if device_upper not in supported:
64
+ return "incompatible", f"Not supported on {device_upper}"
65
+
66
+ # Determine effective memory budget
67
+ if hw["device"] == "mps":
68
+ # Unified memory: reserve ~4GB for OS/system
69
+ effective_mem = hw["total_ram_gb"] - 4
70
+ elif hw["device"] == "cuda":
71
+ effective_mem = hw["vram_gb"]
72
+ else:
73
+ effective_mem = hw["total_ram_gb"]
74
+
75
+ min_vram = hr.get("min_vram_gb", 0)
76
+ rec_vram = hr.get("recommended_vram_gb", min_vram)
77
+
78
+ if min_vram > effective_mem:
79
+ return "too_large", f"Needs {min_vram}GB, have ~{effective_mem:.0f}GB"
80
+
81
+ if rec_vram <= effective_mem:
82
+ return "recommended", f"Fits well ({min_vram}-{rec_vram}GB needed)"
83
+ else:
84
+ return "possible", f"Tight fit ({min_vram}GB min, {rec_vram}GB rec)"
85
+
86
+
87
+ def _make_table(title, style, entries):
88
+ """Build a rich Table for a tier of models."""
89
+ table = Table(title=title)
90
+ table.add_column("Model", style="cyan")
91
+ table.add_column("Type", style="yellow")
92
+ table.add_column("VRAM", style=style)
93
+ table.add_column("License", style="blue")
94
+ table.add_column("Notes", style="dim")
95
+ for name, info, reason in entries:
96
+ hr = info.get("hardware_requirements", {})
97
+ license_type = info.get("license_info", {}).get("type", "Unknown")
98
+ table.add_row(
99
+ name,
100
+ info.get("model_type", "?"),
101
+ f"{hr.get('min_vram_gb', '?')}-{hr.get('recommended_vram_gb', '?')}GB",
102
+ license_type,
103
+ reason,
104
+ )
105
+ return table
106
+
107
+
108
+ @click.command()
109
+ @click.option(
110
+ "--device",
111
+ type=click.Choice(["auto", "cuda", "mps", "cpu"]),
112
+ default="auto",
113
+ help="Target device (default: auto-detect)",
114
+ )
115
+ @click.option(
116
+ "--all", "show_all", is_flag=True, help="Show all models including incompatible"
117
+ )
118
+ @click.option(
119
+ "--commercial-only", is_flag=True, help="Only show commercially licensed models"
120
+ )
121
+ def recommend(device, show_all, commercial_only):
122
+ """Recommend models for your hardware.
123
+
124
+ Detects your GPU, memory, and suggests models that will fit.
125
+ """
126
+ hw = _detect_hardware()
127
+ if device != "auto":
128
+ hw["device"] = device
129
+
130
+ # Hardware summary
131
+ lines = [
132
+ f"[bold blue]Hardware Detection[/bold blue]",
133
+ f"Device: {hw['device_name']}",
134
+ f"Total RAM: {hw['total_ram_gb']} GB",
135
+ ]
136
+ if hw["device"] == "cuda":
137
+ lines.append(f"VRAM: {hw['vram_gb']} GB")
138
+ elif hw["device"] == "mps":
139
+ lines.append(f"Unified Memory: {hw['total_ram_gb']} GB (MPS uses shared RAM)")
140
+ console.print(Panel.fit("\n".join(lines)))
141
+
142
+ # Classify all models
143
+ all_models = model_registry.get_all_models()
144
+ recommended = []
145
+ possible = []
146
+ too_large = []
147
+
148
+ for name, info in sorted(all_models.items()):
149
+ if commercial_only:
150
+ license_info = info.get("license_info", {})
151
+ if not license_info.get("commercial_use", False):
152
+ continue
153
+
154
+ tier, reason = _classify_model(name, info, hw)
155
+ entry = (name, info, reason)
156
+ if tier == "recommended":
157
+ recommended.append(entry)
158
+ elif tier == "possible":
159
+ possible.append(entry)
160
+ elif tier == "too_large":
161
+ too_large.append(entry)
162
+
163
+ # Display tables
164
+ if recommended:
165
+ console.print(_make_table("[green]Recommended Models[/green]", "green", recommended))
166
+
167
+ if possible:
168
+ console.print(_make_table("[yellow]Possible (tight fit)[/yellow]", "yellow", possible))
169
+
170
+ if show_all and too_large:
171
+ table = Table(title="[red]Too Large for Current Hardware[/red]")
172
+ table.add_column("Model", style="cyan")
173
+ table.add_column("Min VRAM", style="red")
174
+ table.add_column("Reason", style="dim")
175
+ for name, info, reason in too_large:
176
+ hr = info.get("hardware_requirements", {})
177
+ table.add_row(name, f"{hr.get('min_vram_gb', '?')} GB", reason)
178
+ console.print(table)
179
+
180
+ # Summary
181
+ total = len(recommended) + len(possible)
182
+ if total == 0:
183
+ console.print("[yellow]No compatible models found for your hardware.[/yellow]")
184
+ console.print("[dim]Try --device cpu to see CPU-compatible models.[/dim]")
185
+ else:
186
+ console.print(
187
+ f"\n[green]Found {len(recommended)} recommended + "
188
+ f"{len(possible)} possible models[/green]"
189
+ )
190
+ # Pick a good quick-start model: prefer standalone gen models over controlnet
191
+ quick_start_priority = [
192
+ "pixart-sigma", "sana-1.5", "flux.1-dev-gguf-q4ks",
193
+ "dreamshaper", "stable-diffusion-1.5",
194
+ "flux.1-schnell", "realvisxl-v4", "sdxl-turbo",
195
+ ]
196
+ top = None
197
+ rec_names = {name for name, _, _ in recommended}
198
+ for candidate in quick_start_priority:
199
+ if candidate in rec_names:
200
+ top = candidate
201
+ break
202
+ if top is None and recommended:
203
+ top = recommended[0][0]
204
+ if top:
205
+ console.print(f"\n[bold]Quick start:[/bold] ollamadiffuser pull {top}")
@@ -0,0 +1,197 @@
1
+ """Model registry CLI commands"""
2
+
3
+ import sys
4
+ from typing import Optional
5
+
6
+ import click
7
+ from rich.console import Console
8
+ from rich.table import Table
9
+ from rich import print as rprint
10
+
11
+ from ..core.models.manager import model_manager
12
+ from ..core.config.settings import settings
13
+ from ..core.config.model_registry import model_registry
14
+
15
+ console = Console()
16
+
17
+
18
+ @click.group()
19
+ def registry():
20
+ """Manage model registry and show available models"""
21
+ pass
22
+
23
+
24
+ @registry.command()
25
+ @click.option("--format", "-f", type=click.Choice(["table", "json", "yaml"]), default="table")
26
+ @click.option("--installed-only", is_flag=True)
27
+ @click.option("--available-only", is_flag=True)
28
+ @click.option("--external-only", is_flag=True)
29
+ def list(format: str, installed_only: bool, available_only: bool, external_only: bool):
30
+ """List models in the registry"""
31
+ if installed_only:
32
+ models = model_registry.get_installed_models()
33
+ title = "Installed Models"
34
+ elif available_only:
35
+ models = model_registry.get_available_models()
36
+ title = "Available Models"
37
+ elif external_only:
38
+ models = model_registry.get_external_api_models_only()
39
+ title = "External API Models"
40
+ else:
41
+ models = model_registry.get_all_models()
42
+ title = "All Models"
43
+
44
+ installed_names = set(model_registry.get_installed_models().keys())
45
+ current = model_manager.get_current_model()
46
+
47
+ if not models:
48
+ rprint(f"[yellow]No models found: {title}[/yellow]")
49
+ return
50
+
51
+ if format == "table":
52
+ table = Table(title=title)
53
+ table.add_column("Model", style="cyan")
54
+ table.add_column("Type", style="yellow")
55
+ table.add_column("Repository", style="blue")
56
+ table.add_column("Status", style="green")
57
+
58
+ for name, info in models.items():
59
+ status = "Installed" if name in installed_names else "Available"
60
+ if name == current:
61
+ status += " (current)"
62
+ table.add_row(name, info.get("model_type", "?"), info.get("repo_id", "?"), status)
63
+
64
+ console.print(table)
65
+ console.print(f"\n[dim]Total: {len(models)} | Installed: {len(installed_names)}[/dim]")
66
+
67
+ elif format == "json":
68
+ import json
69
+ print(json.dumps(models, indent=2, ensure_ascii=False))
70
+ elif format == "yaml":
71
+ import yaml
72
+ print(yaml.dump(models, default_flow_style=False, allow_unicode=True))
73
+
74
+
75
+ @registry.command()
76
+ @click.argument("model_name")
77
+ @click.argument("repo_id")
78
+ @click.argument("model_type")
79
+ @click.option("--variant", help="Model variant")
80
+ @click.option("--save", is_flag=True, help="Save to config file")
81
+ def add(model_name: str, repo_id: str, model_type: str, variant: Optional[str], save: bool):
82
+ """Add a model to the registry"""
83
+ config = {"repo_id": repo_id, "model_type": model_type}
84
+ if variant:
85
+ config["variant"] = variant
86
+
87
+ if model_registry.add_model(model_name, config):
88
+ rprint(f"[green]Model '{model_name}' added![/green]")
89
+ if save:
90
+ import json
91
+ config_path = settings.config_dir / "models.json"
92
+ user_models = {}
93
+ if config_path.exists():
94
+ with open(config_path) as f:
95
+ data = json.load(f)
96
+ user_models = data.get("models", {})
97
+ user_models[model_name] = config
98
+ model_registry.save_user_config(user_models, config_path)
99
+ rprint(f"[green]Saved to {config_path}[/green]")
100
+ else:
101
+ rprint(f"[red]Failed to add '{model_name}'![/red]")
102
+ sys.exit(1)
103
+
104
+
105
+ @registry.command()
106
+ @click.argument("model_name")
107
+ def remove(model_name: str):
108
+ """Remove a model from the registry"""
109
+ if model_registry.remove_model(model_name):
110
+ rprint(f"[green]Model '{model_name}' removed![/green]")
111
+ else:
112
+ rprint(f"[red]Model '{model_name}' not found![/red]")
113
+ sys.exit(1)
114
+
115
+
116
+ @registry.command()
117
+ def reload():
118
+ """Reload the registry from config files"""
119
+ model_registry.reload()
120
+ models = model_registry.get_all_models()
121
+ rprint(f"[green]Registry reloaded: {len(models)} models[/green]")
122
+
123
+
124
+ @registry.command("import-config")
125
+ @click.argument("config_file", type=click.Path(exists=True))
126
+ def import_config(config_file: str):
127
+ """Import models from a config file"""
128
+ from pathlib import Path
129
+ import json
130
+ import yaml
131
+
132
+ path = Path(config_file)
133
+ with open(path, encoding="utf-8") as f:
134
+ if path.suffix == ".json":
135
+ data = json.load(f)
136
+ elif path.suffix in (".yaml", ".yml"):
137
+ data = yaml.safe_load(f)
138
+ else:
139
+ rprint(f"[red]Unsupported format: {path.suffix}[/red]")
140
+ sys.exit(1)
141
+
142
+ if "models" not in data:
143
+ rprint("[red]Config must contain 'models' section[/red]")
144
+ sys.exit(1)
145
+
146
+ count = 0
147
+ for name, cfg in data["models"].items():
148
+ if model_registry.add_model(name, cfg):
149
+ count += 1
150
+ rprint(f"[green]Imported {count} models[/green]")
151
+
152
+
153
+ @registry.command()
154
+ @click.option("--output", "-o", help="Output file path")
155
+ @click.option("--format", "-f", type=click.Choice(["json", "yaml"]), default="json")
156
+ def export(output: Optional[str], format: str):
157
+ """Export registry to a file"""
158
+ from pathlib import Path
159
+ import json
160
+ import yaml
161
+
162
+ models = model_registry.get_all_models()
163
+ data = {"models": models}
164
+
165
+ out = Path(output) if output else Path(f"models.{format}")
166
+ with open(out, "w", encoding="utf-8") as f:
167
+ if format == "json":
168
+ json.dump(data, f, indent=2, ensure_ascii=False)
169
+ else:
170
+ yaml.safe_dump(data, f, default_flow_style=False, allow_unicode=True)
171
+
172
+ rprint(f"[green]Exported {len(models)} models to {out}[/green]")
173
+
174
+
175
+ @registry.command("check-gguf")
176
+ def check_gguf():
177
+ """Check GGUF support status"""
178
+ from ..core.models.gguf_loader import GGUF_AVAILABLE
179
+
180
+ if GGUF_AVAILABLE:
181
+ rprint("[green]GGUF Support Available[/green]")
182
+ models = model_registry.get_all_models()
183
+ gguf_models = {n: i for n, i in models.items() if model_manager.is_gguf_model(n)}
184
+ if gguf_models:
185
+ table = Table()
186
+ table.add_column("Model", style="cyan")
187
+ table.add_column("Variant", style="yellow")
188
+ table.add_column("VRAM", style="green")
189
+ table.add_column("Installed")
190
+ for name, info in gguf_models.items():
191
+ hw = info.get("hardware_requirements", {})
192
+ installed = "Yes" if model_manager.is_model_installed(name) else "No"
193
+ table.add_row(name, info.get("variant", "?"), f"{hw.get('min_vram_gb', '?')}GB", installed)
194
+ console.print(table)
195
+ else:
196
+ rprint("[red]GGUF Support Not Available[/red]")
197
+ rprint("Install with: pip install 'ollamadiffuser[gguf]'")