ollamadiffuser 1.1.6__py3-none-any.whl → 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,757 @@
1
+ """
2
+ Model Registry Configuration
3
+
4
+ This file contains the default model definitions and provides a system
5
+ for managing models externally without hardcoding them in the manager.
6
+ """
7
+
8
+ import os
9
+ import json
10
+ import yaml
11
+ import requests
12
+ from pathlib import Path
13
+ from typing import Dict, Any, List, Optional
14
+ from .settings import settings
15
+
16
+ class ModelRegistry:
17
+ """Dynamic model registry that supports external model definitions"""
18
+
19
+ def __init__(self):
20
+ self._registry: Dict[str, Dict[str, Any]] = {}
21
+ self._external_registries: List[str] = []
22
+ self._external_api_models: Dict[str, Dict[str, Any]] = {}
23
+ self._load_default_models()
24
+ self._load_external_models()
25
+ # Load external API models on initialization
26
+ self._refresh_external_api_models()
27
+
28
+ def _get_model_manager(self):
29
+ """Get model manager instance (lazy import to avoid circular imports)"""
30
+ try:
31
+ from ..models.manager import ModelManager
32
+ if not hasattr(self, '_model_manager'):
33
+ self._model_manager = ModelManager()
34
+ return self._model_manager
35
+ except ImportError:
36
+ return None
37
+
38
+ def _load_default_models(self):
39
+ """Load default hardcoded models for backward compatibility"""
40
+ self._registry = {
41
+ "flux.1-dev": {
42
+ "repo_id": "black-forest-labs/FLUX.1-dev",
43
+ "model_type": "flux",
44
+ "variant": "fp16",
45
+ "parameters": {
46
+ "num_inference_steps": 20,
47
+ "guidance_scale": 3.0,
48
+ "max_sequence_length": 512
49
+ },
50
+ "hardware_requirements": {
51
+ "min_vram_gb": 20,
52
+ "recommended_vram_gb": 24,
53
+ "min_ram_gb": 32,
54
+ "recommended_ram_gb": 64,
55
+ "disk_space_gb": 24,
56
+ "supported_devices": ["CUDA"],
57
+ "performance_notes": "Requires significant VRAM. Consider using smaller variants for lower-end hardware."
58
+ },
59
+ "license_info": {
60
+ "type": "FLUX.1 Non-Commercial License",
61
+ "requires_agreement": True,
62
+ "commercial_use": False
63
+ }
64
+ },
65
+
66
+ "flux.1-schnell": {
67
+ "repo_id": "black-forest-labs/FLUX.1-schnell",
68
+ "model_type": "flux",
69
+ "variant": "fp16",
70
+ "parameters": {
71
+ "num_inference_steps": 4,
72
+ "guidance_scale": 1.0,
73
+ "max_sequence_length": 256
74
+ },
75
+ "hardware_requirements": {
76
+ "min_vram_gb": 16,
77
+ "recommended_vram_gb": 20,
78
+ "min_ram_gb": 24,
79
+ "recommended_ram_gb": 32,
80
+ "disk_space_gb": 24,
81
+ "supported_devices": ["CUDA", "MPS"],
82
+ "performance_notes": "Faster variant with fewer steps. Good balance of speed and quality."
83
+ },
84
+ "license_info": {
85
+ "type": "Apache 2.0",
86
+ "requires_agreement": False,
87
+ "commercial_use": True
88
+ }
89
+ },
90
+
91
+ "flux.1-dev-gguf-q2k": {
92
+ "repo_id": "city96/FLUX.1-dev-gguf",
93
+ "model_type": "flux",
94
+ "variant": "gguf-q2k",
95
+ "parameters": {
96
+ "num_inference_steps": 28,
97
+ "guidance_scale": 1.0,
98
+ "max_sequence_length": 512
99
+ },
100
+ "hardware_requirements": {
101
+ "min_vram_gb": 3,
102
+ "recommended_vram_gb": 6,
103
+ "min_ram_gb": 8,
104
+ "recommended_ram_gb": 16,
105
+ "disk_space_gb": 3,
106
+ "supported_devices": ["CUDA", "CPU"],
107
+ "performance_notes": "Heavily quantized, lowest quality but very fast"
108
+ },
109
+ "license_info": {
110
+ "type": "FLUX.1 Non-Commercial License",
111
+ "requires_agreement": True,
112
+ "commercial_use": False
113
+ }
114
+ },
115
+
116
+ "flux.1-dev-gguf-q3ks": {
117
+ "repo_id": "city96/FLUX.1-dev-gguf",
118
+ "model_type": "flux",
119
+ "variant": "gguf-q3ks",
120
+ "parameters": {
121
+ "num_inference_steps": 28,
122
+ "guidance_scale": 1.0,
123
+ "max_sequence_length": 512
124
+ },
125
+ "hardware_requirements": {
126
+ "min_vram_gb": 4,
127
+ "recommended_vram_gb": 8,
128
+ "min_ram_gb": 10,
129
+ "recommended_ram_gb": 16,
130
+ "disk_space_gb": 4,
131
+ "supported_devices": ["CUDA", "CPU"],
132
+ "performance_notes": "Light quantization, good speed/quality balance"
133
+ },
134
+ "license_info": {
135
+ "type": "FLUX.1 Non-Commercial License",
136
+ "requires_agreement": True,
137
+ "commercial_use": False
138
+ }
139
+ },
140
+
141
+ "flux.1-dev-gguf-q4ks": {
142
+ "repo_id": "city96/FLUX.1-dev-gguf",
143
+ "model_type": "flux",
144
+ "variant": "gguf-q4ks",
145
+ "parameters": {
146
+ "num_inference_steps": 28,
147
+ "guidance_scale": 1.0,
148
+ "max_sequence_length": 512
149
+ },
150
+ "hardware_requirements": {
151
+ "min_vram_gb": 6,
152
+ "recommended_vram_gb": 10,
153
+ "min_ram_gb": 12,
154
+ "recommended_ram_gb": 20,
155
+ "disk_space_gb": 6,
156
+ "supported_devices": ["CUDA", "CPU"],
157
+ "performance_notes": "Recommended quantization level - good quality and speed"
158
+ },
159
+ "license_info": {
160
+ "type": "FLUX.1 Non-Commercial License",
161
+ "requires_agreement": True,
162
+ "commercial_use": False
163
+ }
164
+ },
165
+
166
+ "flux.1-dev-gguf-q4-0": {
167
+ "repo_id": "city96/FLUX.1-dev-gguf",
168
+ "model_type": "flux",
169
+ "variant": "gguf-q4-0",
170
+ "parameters": {
171
+ "num_inference_steps": 28,
172
+ "guidance_scale": 1.0,
173
+ "max_sequence_length": 512
174
+ },
175
+ "hardware_requirements": {
176
+ "min_vram_gb": 6,
177
+ "recommended_vram_gb": 10,
178
+ "min_ram_gb": 12,
179
+ "recommended_ram_gb": 20,
180
+ "disk_space_gb": 6,
181
+ "supported_devices": ["CUDA", "CPU"],
182
+ "performance_notes": "Q4_0 quantization - fast inference with good quality"
183
+ },
184
+ "license_info": {
185
+ "type": "FLUX.1 Non-Commercial License",
186
+ "requires_agreement": True,
187
+ "commercial_use": False
188
+ }
189
+ },
190
+
191
+ "flux.1-dev-gguf-q4-1": {
192
+ "repo_id": "city96/FLUX.1-dev-gguf",
193
+ "model_type": "flux",
194
+ "variant": "gguf-q4-1",
195
+ "parameters": {
196
+ "num_inference_steps": 28,
197
+ "guidance_scale": 1.0,
198
+ "max_sequence_length": 512
199
+ },
200
+ "hardware_requirements": {
201
+ "min_vram_gb": 6,
202
+ "recommended_vram_gb": 10,
203
+ "min_ram_gb": 12,
204
+ "recommended_ram_gb": 20,
205
+ "disk_space_gb": 6,
206
+ "supported_devices": ["CUDA", "CPU"],
207
+ "performance_notes": "Q4_1 quantization - improved Q4_0 with better accuracy"
208
+ },
209
+ "license_info": {
210
+ "type": "FLUX.1 Non-Commercial License",
211
+ "requires_agreement": True,
212
+ "commercial_use": False
213
+ }
214
+ },
215
+
216
+ "flux.1-dev-gguf-q5ks": {
217
+ "repo_id": "city96/FLUX.1-dev-gguf",
218
+ "model_type": "flux",
219
+ "variant": "gguf-q5ks",
220
+ "parameters": {
221
+ "num_inference_steps": 28,
222
+ "guidance_scale": 1.0,
223
+ "max_sequence_length": 512
224
+ },
225
+ "hardware_requirements": {
226
+ "min_vram_gb": 8,
227
+ "recommended_vram_gb": 12,
228
+ "min_ram_gb": 16,
229
+ "recommended_ram_gb": 24,
230
+ "disk_space_gb": 8,
231
+ "supported_devices": ["CUDA", "CPU"],
232
+ "performance_notes": "Higher quality quantization, slower but better results"
233
+ },
234
+ "license_info": {
235
+ "type": "FLUX.1 Non-Commercial License",
236
+ "requires_agreement": True,
237
+ "commercial_use": False
238
+ }
239
+ },
240
+
241
+ "flux.1-dev-gguf-q5-0": {
242
+ "repo_id": "city96/FLUX.1-dev-gguf",
243
+ "model_type": "flux",
244
+ "variant": "gguf-q5-0",
245
+ "parameters": {
246
+ "num_inference_steps": 28,
247
+ "guidance_scale": 1.0,
248
+ "max_sequence_length": 512
249
+ },
250
+ "hardware_requirements": {
251
+ "min_vram_gb": 8,
252
+ "recommended_vram_gb": 12,
253
+ "min_ram_gb": 16,
254
+ "recommended_ram_gb": 24,
255
+ "disk_space_gb": 8,
256
+ "supported_devices": ["CUDA", "CPU"],
257
+ "performance_notes": "Q5_0 quantization - good balance of size and quality"
258
+ },
259
+ "license_info": {
260
+ "type": "FLUX.1 Non-Commercial License",
261
+ "requires_agreement": True,
262
+ "commercial_use": False
263
+ }
264
+ },
265
+
266
+ "flux.1-dev-gguf-q5-1": {
267
+ "repo_id": "city96/FLUX.1-dev-gguf",
268
+ "model_type": "flux",
269
+ "variant": "gguf-q5-1",
270
+ "parameters": {
271
+ "num_inference_steps": 28,
272
+ "guidance_scale": 1.0,
273
+ "max_sequence_length": 512
274
+ },
275
+ "hardware_requirements": {
276
+ "min_vram_gb": 8,
277
+ "recommended_vram_gb": 12,
278
+ "min_ram_gb": 16,
279
+ "recommended_ram_gb": 24,
280
+ "disk_space_gb": 8,
281
+ "supported_devices": ["CUDA", "CPU"],
282
+ "performance_notes": "Q5_1 quantization - improved Q5_0 with better accuracy"
283
+ },
284
+ "license_info": {
285
+ "type": "FLUX.1 Non-Commercial License",
286
+ "requires_agreement": True,
287
+ "commercial_use": False
288
+ }
289
+ },
290
+
291
+ "flux.1-dev-gguf-q6k": {
292
+ "repo_id": "city96/FLUX.1-dev-gguf",
293
+ "model_type": "flux",
294
+ "variant": "gguf-q6k",
295
+ "parameters": {
296
+ "num_inference_steps": 28,
297
+ "guidance_scale": 1.0,
298
+ "max_sequence_length": 512
299
+ },
300
+ "hardware_requirements": {
301
+ "min_vram_gb": 10,
302
+ "recommended_vram_gb": 16,
303
+ "min_ram_gb": 20,
304
+ "recommended_ram_gb": 32,
305
+ "disk_space_gb": 10,
306
+ "supported_devices": ["CUDA", "CPU"],
307
+ "performance_notes": "High quality quantization, close to original"
308
+ },
309
+ "license_info": {
310
+ "type": "FLUX.1 Non-Commercial License",
311
+ "requires_agreement": True,
312
+ "commercial_use": False
313
+ }
314
+ },
315
+
316
+ "flux.1-dev-gguf-q8": {
317
+ "repo_id": "city96/FLUX.1-dev-gguf",
318
+ "model_type": "flux",
319
+ "variant": "gguf-q8",
320
+ "parameters": {
321
+ "num_inference_steps": 28,
322
+ "guidance_scale": 1.0,
323
+ "max_sequence_length": 512
324
+ },
325
+ "hardware_requirements": {
326
+ "min_vram_gb": 12,
327
+ "recommended_vram_gb": 18,
328
+ "min_ram_gb": 24,
329
+ "recommended_ram_gb": 36,
330
+ "disk_space_gb": 12,
331
+ "supported_devices": ["CUDA", "CPU"],
332
+ "performance_notes": "Very high quality, minimal quantization loss"
333
+ },
334
+ "license_info": {
335
+ "type": "FLUX.1 Non-Commercial License",
336
+ "requires_agreement": True,
337
+ "commercial_use": False
338
+ }
339
+ },
340
+
341
+ "flux.1-dev-gguf-f16": {
342
+ "repo_id": "city96/FLUX.1-dev-gguf",
343
+ "model_type": "flux",
344
+ "variant": "gguf-f16",
345
+ "parameters": {
346
+ "num_inference_steps": 28,
347
+ "guidance_scale": 1.0,
348
+ "max_sequence_length": 512
349
+ },
350
+ "hardware_requirements": {
351
+ "min_vram_gb": 16,
352
+ "recommended_vram_gb": 24,
353
+ "min_ram_gb": 32,
354
+ "recommended_ram_gb": 48,
355
+ "disk_space_gb": 16,
356
+ "supported_devices": ["CUDA", "CPU"],
357
+ "performance_notes": "Full precision, best quality but largest size"
358
+ },
359
+ "license_info": {
360
+ "type": "FLUX.1 Non-Commercial License",
361
+ "requires_agreement": True,
362
+ "commercial_use": False
363
+ }
364
+ },
365
+
366
+ "stable-diffusion-3.5-medium": {
367
+ "repo_id": "stabilityai/stable-diffusion-3.5-medium",
368
+ "model_type": "sd3",
369
+ "variant": "fp16",
370
+ "parameters": {
371
+ "num_inference_steps": 28,
372
+ "guidance_scale": 3.5
373
+ },
374
+ "hardware_requirements": {
375
+ "min_vram_gb": 8,
376
+ "recommended_vram_gb": 12,
377
+ "min_ram_gb": 16,
378
+ "recommended_ram_gb": 32,
379
+ "disk_space_gb": 10,
380
+ "supported_devices": ["CUDA", "MPS", "CPU"],
381
+ "performance_notes": "Best on NVIDIA RTX 3080+ or Apple M2 Pro+"
382
+ }
383
+ },
384
+ "stable-diffusion-xl-base": {
385
+ "repo_id": "stabilityai/stable-diffusion-xl-base-1.0",
386
+ "model_type": "sdxl",
387
+ "variant": "fp16",
388
+ "parameters": {
389
+ "num_inference_steps": 50,
390
+ "guidance_scale": 7.5
391
+ },
392
+ "hardware_requirements": {
393
+ "min_vram_gb": 6,
394
+ "recommended_vram_gb": 10,
395
+ "min_ram_gb": 12,
396
+ "recommended_ram_gb": 24,
397
+ "disk_space_gb": 7,
398
+ "supported_devices": ["CUDA", "MPS", "CPU"],
399
+ "performance_notes": "Good on NVIDIA RTX 3070+ or Apple M1 Pro+"
400
+ }
401
+ },
402
+ "stable-diffusion-1.5": {
403
+ "repo_id": "runwayml/stable-diffusion-v1-5",
404
+ "model_type": "sd15",
405
+ "variant": "fp16",
406
+ "parameters": {
407
+ "num_inference_steps": 50,
408
+ "guidance_scale": 7.5
409
+ },
410
+ "hardware_requirements": {
411
+ "min_vram_gb": 4,
412
+ "recommended_vram_gb": 6,
413
+ "min_ram_gb": 8,
414
+ "recommended_ram_gb": 16,
415
+ "disk_space_gb": 5,
416
+ "supported_devices": ["CUDA", "MPS", "CPU"],
417
+ "performance_notes": "Runs well on most modern GPUs, including GTX 1060+"
418
+ }
419
+ },
420
+
421
+ # ControlNet models for SD 1.5
422
+ "controlnet-canny-sd15": {
423
+ "repo_id": "lllyasviel/sd-controlnet-canny",
424
+ "model_type": "controlnet_sd15",
425
+ "base_model": "stable-diffusion-1.5",
426
+ "controlnet_type": "canny",
427
+ "variant": "fp16",
428
+ "parameters": {
429
+ "num_inference_steps": 50,
430
+ "guidance_scale": 7.5,
431
+ "controlnet_conditioning_scale": 1.0
432
+ },
433
+ "hardware_requirements": {
434
+ "min_vram_gb": 6,
435
+ "recommended_vram_gb": 8,
436
+ "min_ram_gb": 12,
437
+ "recommended_ram_gb": 20,
438
+ "disk_space_gb": 7,
439
+ "supported_devices": ["CUDA", "MPS", "CPU"],
440
+ "performance_notes": "Requires base SD 1.5 model + ControlNet model. Good for edge detection."
441
+ }
442
+ },
443
+
444
+ "controlnet-depth-sd15": {
445
+ "repo_id": "lllyasviel/sd-controlnet-depth",
446
+ "model_type": "controlnet_sd15",
447
+ "base_model": "stable-diffusion-1.5",
448
+ "controlnet_type": "depth",
449
+ "variant": "fp16",
450
+ "parameters": {
451
+ "num_inference_steps": 50,
452
+ "guidance_scale": 7.5,
453
+ "controlnet_conditioning_scale": 1.0
454
+ },
455
+ "hardware_requirements": {
456
+ "min_vram_gb": 6,
457
+ "recommended_vram_gb": 8,
458
+ "min_ram_gb": 12,
459
+ "recommended_ram_gb": 20,
460
+ "disk_space_gb": 7,
461
+ "supported_devices": ["CUDA", "MPS", "CPU"],
462
+ "performance_notes": "Requires base SD 1.5 model + ControlNet model. Good for depth-based control."
463
+ }
464
+ },
465
+
466
+ "controlnet-openpose-sd15": {
467
+ "repo_id": "lllyasviel/sd-controlnet-openpose",
468
+ "model_type": "controlnet_sd15",
469
+ "base_model": "stable-diffusion-1.5",
470
+ "controlnet_type": "openpose",
471
+ "variant": "fp16",
472
+ "parameters": {
473
+ "num_inference_steps": 50,
474
+ "guidance_scale": 7.5,
475
+ "controlnet_conditioning_scale": 1.0
476
+ },
477
+ "hardware_requirements": {
478
+ "min_vram_gb": 6,
479
+ "recommended_vram_gb": 8,
480
+ "min_ram_gb": 12,
481
+ "recommended_ram_gb": 20,
482
+ "disk_space_gb": 7,
483
+ "supported_devices": ["CUDA", "MPS", "CPU"],
484
+ "performance_notes": "Requires base SD 1.5 model + ControlNet model. Good for pose control."
485
+ }
486
+ },
487
+
488
+ "controlnet-scribble-sd15": {
489
+ "repo_id": "lllyasviel/sd-controlnet-scribble",
490
+ "model_type": "controlnet_sd15",
491
+ "base_model": "stable-diffusion-1.5",
492
+ "controlnet_type": "scribble",
493
+ "variant": "fp16",
494
+ "parameters": {
495
+ "num_inference_steps": 50,
496
+ "guidance_scale": 7.5,
497
+ "controlnet_conditioning_scale": 1.0
498
+ },
499
+ "hardware_requirements": {
500
+ "min_vram_gb": 6,
501
+ "recommended_vram_gb": 8,
502
+ "min_ram_gb": 12,
503
+ "recommended_ram_gb": 20,
504
+ "disk_space_gb": 7,
505
+ "supported_devices": ["CUDA", "MPS", "CPU"],
506
+ "performance_notes": "Requires base SD 1.5 model + ControlNet model. Good for sketch-based control."
507
+ }
508
+ },
509
+
510
+ # ControlNet models for SDXL
511
+ "controlnet-canny-sdxl": {
512
+ "repo_id": "diffusers/controlnet-canny-sdxl-1.0",
513
+ "model_type": "controlnet_sdxl",
514
+ "base_model": "stable-diffusion-xl-base",
515
+ "controlnet_type": "canny",
516
+ "variant": "fp16",
517
+ "parameters": {
518
+ "num_inference_steps": 50,
519
+ "guidance_scale": 7.5,
520
+ "controlnet_conditioning_scale": 1.0
521
+ },
522
+ "hardware_requirements": {
523
+ "min_vram_gb": 8,
524
+ "recommended_vram_gb": 12,
525
+ "min_ram_gb": 16,
526
+ "recommended_ram_gb": 28,
527
+ "disk_space_gb": 10,
528
+ "supported_devices": ["CUDA", "MPS", "CPU"],
529
+ "performance_notes": "Requires base SDXL model + ControlNet model. Good for edge detection with SDXL quality."
530
+ }
531
+ },
532
+
533
+ "controlnet-depth-sdxl": {
534
+ "repo_id": "diffusers/controlnet-depth-sdxl-1.0",
535
+ "model_type": "controlnet_sdxl",
536
+ "base_model": "stable-diffusion-xl-base",
537
+ "controlnet_type": "depth",
538
+ "variant": "fp16",
539
+ "parameters": {
540
+ "num_inference_steps": 50,
541
+ "guidance_scale": 7.5,
542
+ "controlnet_conditioning_scale": 1.0
543
+ },
544
+ "hardware_requirements": {
545
+ "min_vram_gb": 8,
546
+ "recommended_vram_gb": 12,
547
+ "min_ram_gb": 16,
548
+ "recommended_ram_gb": 28,
549
+ "disk_space_gb": 10,
550
+ "supported_devices": ["CUDA", "MPS", "CPU"],
551
+ "performance_notes": "Requires base SDXL model + ControlNet model. Good for depth-based control with SDXL quality."
552
+ }
553
+ }
554
+ }
555
+
556
+ def _load_external_models(self):
557
+ """Load models from external configuration files"""
558
+ # Check for user-defined model configurations
559
+ config_paths = [
560
+ settings.config_dir / "models.json",
561
+ settings.config_dir / "models.yaml",
562
+ settings.config_dir / "models.yml",
563
+ Path.home() / ".ollamadiffuser" / "models.json",
564
+ Path.home() / ".ollamadiffuser" / "models.yaml",
565
+ Path.home() / ".ollamadiffuser" / "models.yml",
566
+ ]
567
+
568
+ # Also check environment variable for custom model config path
569
+ if "OLLAMADIFFUSER_MODEL_CONFIG" in os.environ:
570
+ config_paths.append(Path(os.environ["OLLAMADIFFUSER_MODEL_CONFIG"]))
571
+
572
+ for config_path in config_paths:
573
+ if config_path.exists():
574
+ try:
575
+ self._load_config_file(config_path)
576
+ except Exception as e:
577
+ import logging
578
+ logger = logging.getLogger(__name__)
579
+ logger.warning(f"Failed to load model config from {config_path}: {e}")
580
+
581
+ def _load_config_file(self, config_path: Path):
582
+ """Load models from a configuration file"""
583
+ with open(config_path, 'r', encoding='utf-8') as f:
584
+ if config_path.suffix.lower() == '.json':
585
+ data = json.load(f)
586
+ elif config_path.suffix.lower() in ['.yaml', '.yml']:
587
+ data = yaml.safe_load(f)
588
+ else:
589
+ raise ValueError(f"Unsupported config file format: {config_path.suffix}")
590
+
591
+ if 'models' in data:
592
+ for model_name, model_config in data['models'].items():
593
+ self._registry[model_name] = model_config
594
+
595
+ self._external_registries.append(str(config_path))
596
+
597
+ def _refresh_external_api_models(self):
598
+ """Refresh external API models cache"""
599
+ self._external_api_models = self._fetch_external_api_models()
600
+
601
+ def _get_combined_models(self) -> Dict[str, Dict[str, Any]]:
602
+ """Get combined models (local + external API), with local taking precedence"""
603
+ combined_models = {}
604
+ combined_models.update(self._external_api_models) # Add external API models first
605
+ combined_models.update(self._registry) # Local models override external ones
606
+ return combined_models
607
+
608
+ def get_all_models(self) -> Dict[str, Dict[str, Any]]:
609
+ """Get all registered models including external API models"""
610
+ return self._get_combined_models()
611
+
612
+ def get_model(self, model_name: str) -> Optional[Dict[str, Any]]:
613
+ """Get a specific model configuration from local or external sources"""
614
+ # Check local registry first
615
+ if model_name in self._registry:
616
+ return self._registry[model_name]
617
+
618
+ # Check external API models
619
+ if model_name in self._external_api_models:
620
+ return self._external_api_models[model_name]
621
+
622
+ return None
623
+
624
+ def get_model_names(self) -> List[str]:
625
+ """Get list of all model names including external API models"""
626
+ return list(self._get_combined_models().keys())
627
+
628
+ def get_installed_models(self) -> Dict[str, Dict[str, Any]]:
629
+ """Get only actually installed models (from settings.models)"""
630
+ model_manager = self._get_model_manager()
631
+ if model_manager is None:
632
+ return {}
633
+
634
+ installed_models = {}
635
+ installed_model_names = model_manager.list_installed_models()
636
+
637
+ for model_name in installed_model_names:
638
+ model_config = self.get_model(model_name)
639
+ if model_config:
640
+ installed_models[model_name] = model_config
641
+
642
+ return installed_models
643
+
644
+ def get_available_models(self) -> Dict[str, Dict[str, Any]]:
645
+ """Get available but not installed models"""
646
+ model_manager = self._get_model_manager()
647
+ if model_manager is None:
648
+ return self._get_combined_models()
649
+
650
+ installed_model_names = set(model_manager.list_installed_models())
651
+ all_models = self._get_combined_models()
652
+
653
+ available_models = {}
654
+ for model_name, model_config in all_models.items():
655
+ if model_name not in installed_model_names:
656
+ available_models[model_name] = model_config
657
+
658
+ return available_models
659
+
660
+ def is_model_installed(self, model_name: str) -> bool:
661
+ """Check if a model is actually installed"""
662
+ model_manager = self._get_model_manager()
663
+ if model_manager is None:
664
+ return False
665
+ return model_manager.is_model_installed(model_name)
666
+
667
+ def add_model(self, model_name: str, model_config: Dict[str, Any]) -> bool:
668
+ """Add a new model to the local registry (runtime only)"""
669
+ try:
670
+ # Validate required fields
671
+ required_fields = ['repo_id', 'model_type']
672
+ for field in required_fields:
673
+ if field not in model_config:
674
+ raise ValueError(f"Missing required field: {field}")
675
+
676
+ self._registry[model_name] = model_config
677
+ return True
678
+ except Exception:
679
+ return False
680
+
681
+ def remove_model(self, model_name: str) -> bool:
682
+ """Remove a model from the local registry (runtime only)"""
683
+ if model_name in self._registry:
684
+ del self._registry[model_name]
685
+ return True
686
+ return False
687
+
688
+ def reload(self):
689
+ """Reload the model registry including external API models"""
690
+ self._registry.clear()
691
+ self._external_registries.clear()
692
+ self._external_api_models.clear()
693
+ self._load_default_models()
694
+ self._load_external_models()
695
+ self._refresh_external_api_models()
696
+
697
+ def save_user_config(self, models: Dict[str, Dict[str, Any]], config_path: Optional[Path] = None):
698
+ """Save user-defined models to a configuration file"""
699
+ if config_path is None:
700
+ config_path = settings.config_dir / "models.json"
701
+
702
+ # Ensure config directory exists
703
+ config_path.parent.mkdir(parents=True, exist_ok=True)
704
+
705
+ config_data = {"models": models}
706
+
707
+ with open(config_path, 'w', encoding='utf-8') as f:
708
+ if config_path.suffix.lower() == '.json':
709
+ json.dump(config_data, f, indent=2, ensure_ascii=False)
710
+ elif config_path.suffix.lower() in ['.yaml', '.yml']:
711
+ yaml.safe_dump(config_data, f, default_flow_style=False, allow_unicode=True)
712
+
713
+ def get_external_registries(self) -> List[str]:
714
+ """Get list of external registry files that were loaded"""
715
+ return self._external_registries.copy()
716
+
717
+ def refresh_external_models(self):
718
+ """Manually refresh external API models"""
719
+ self._refresh_external_api_models()
720
+
721
+ def get_local_models_only(self) -> Dict[str, Dict[str, Any]]:
722
+ """Get only locally defined models (from registry, not necessarily installed)"""
723
+ return self._registry.copy()
724
+
725
+ def get_external_api_models_only(self) -> Dict[str, Dict[str, Any]]:
726
+ """Get only external API models"""
727
+ return self._external_api_models.copy()
728
+
729
+ def _fetch_external_api_models(self) -> Dict[str, Dict[str, Any]]:
730
+ """Fetch models from external API"""
731
+ try:
732
+ response = requests.get("https://www.ollamadiffuser.com/api/models", timeout=10)
733
+ if response.status_code == 200:
734
+ api_data = response.json()
735
+ # Expected format: {"models": {"model_name": {...}, ...}}
736
+ if "models" in api_data:
737
+ return api_data["models"]
738
+ else:
739
+ # If the API returns a different format, adapt accordingly
740
+ return api_data if isinstance(api_data, dict) else {}
741
+ else:
742
+ return {}
743
+ except Exception as e:
744
+ # Log the error but don't fail completely
745
+ import logging
746
+ logger = logging.getLogger(__name__)
747
+ logger.warning(f"Failed to fetch external API models: {e}")
748
+ return {}
749
+
750
+ def get_all_models_with_external(self) -> Dict[str, Dict[str, Any]]:
751
+ """Get all models including those from external API (deprecated - use get_all_models)"""
752
+ # This method is now redundant since get_all_models includes external by default
753
+ return self.get_all_models()
754
+
755
+
756
+ # Global model registry instance
757
+ model_registry = ModelRegistry()