ollamadiffuser 1.2.2__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ollamadiffuser/__init__.py +1 -1
- ollamadiffuser/api/server.py +312 -312
- ollamadiffuser/cli/config_commands.py +119 -0
- ollamadiffuser/cli/lora_commands.py +169 -0
- ollamadiffuser/cli/main.py +85 -1233
- ollamadiffuser/cli/model_commands.py +664 -0
- ollamadiffuser/cli/recommend_command.py +205 -0
- ollamadiffuser/cli/registry_commands.py +197 -0
- ollamadiffuser/core/config/model_registry.py +562 -11
- ollamadiffuser/core/config/settings.py +24 -2
- ollamadiffuser/core/inference/__init__.py +5 -0
- ollamadiffuser/core/inference/base.py +182 -0
- ollamadiffuser/core/inference/engine.py +204 -1405
- ollamadiffuser/core/inference/strategies/__init__.py +1 -0
- ollamadiffuser/core/inference/strategies/controlnet_strategy.py +170 -0
- ollamadiffuser/core/inference/strategies/flux_strategy.py +136 -0
- ollamadiffuser/core/inference/strategies/generic_strategy.py +164 -0
- ollamadiffuser/core/inference/strategies/gguf_strategy.py +113 -0
- ollamadiffuser/core/inference/strategies/hidream_strategy.py +104 -0
- ollamadiffuser/core/inference/strategies/sd15_strategy.py +134 -0
- ollamadiffuser/core/inference/strategies/sd3_strategy.py +80 -0
- ollamadiffuser/core/inference/strategies/sdxl_strategy.py +131 -0
- ollamadiffuser/core/inference/strategies/video_strategy.py +108 -0
- ollamadiffuser/mcp/__init__.py +0 -0
- ollamadiffuser/mcp/server.py +184 -0
- ollamadiffuser/ui/templates/index.html +62 -1
- ollamadiffuser/ui/web.py +116 -54
- {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/METADATA +337 -108
- ollamadiffuser-2.0.0.dist-info/RECORD +61 -0
- {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/WHEEL +1 -1
- {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/entry_points.txt +1 -0
- ollamadiffuser/core/models/registry.py +0 -384
- ollamadiffuser/ui/samples/.DS_Store +0 -0
- ollamadiffuser-1.2.2.dist-info/RECORD +0 -45
- {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/licenses/LICENSE +0 -0
- {ollamadiffuser-1.2.2.dist-info → ollamadiffuser-2.0.0.dist-info}/top_level.txt +0 -0
|
@@ -402,7 +402,7 @@
|
|
|
402
402
|
<div class="container">
|
|
403
403
|
<div class="header">
|
|
404
404
|
<h1>🎨 OllamaDiffuser</h1>
|
|
405
|
-
<p>Image Generation Model Management Tool</p>
|
|
405
|
+
<p>Image Generation Model Management Tool — v2.0.0</p>
|
|
406
406
|
</div>
|
|
407
407
|
|
|
408
408
|
<div class="status-bar">
|
|
@@ -603,6 +603,12 @@
|
|
|
603
603
|
{% endif %}
|
|
604
604
|
</div>
|
|
605
605
|
</div>
|
|
606
|
+
|
|
607
|
+
<div class="form-group">
|
|
608
|
+
<label for="seed">Seed (optional, leave empty for random)</label>
|
|
609
|
+
<input type="number" name="seed" id="seed" value="{{ seed or '' }}" min="0" max="4294967295" placeholder="Random">
|
|
610
|
+
<small class="help-text">Set a seed for reproducible results</small>
|
|
611
|
+
</div>
|
|
606
612
|
|
|
607
613
|
<!-- ControlNet Section -->
|
|
608
614
|
{% if is_controlnet_model %}
|
|
@@ -691,6 +697,46 @@
|
|
|
691
697
|
🎨 Generate Image
|
|
692
698
|
</button>
|
|
693
699
|
</form>
|
|
700
|
+
|
|
701
|
+
<!-- img2img Section -->
|
|
702
|
+
<details style="margin-top: 20px;">
|
|
703
|
+
<summary style="cursor: pointer; font-weight: 600; color: #374151; padding: 10px 0;">🖼️ Image-to-Image (img2img)</summary>
|
|
704
|
+
<form method="post" action="/generate/img2img" enctype="multipart/form-data" style="margin-top: 10px;">
|
|
705
|
+
<div class="form-group">
|
|
706
|
+
<label for="img2img_prompt">Prompt</label>
|
|
707
|
+
<textarea name="prompt" id="img2img_prompt" rows="2" placeholder="Describe the desired output..." required></textarea>
|
|
708
|
+
</div>
|
|
709
|
+
<div class="form-group">
|
|
710
|
+
<label for="img2img_image">Input Image</label>
|
|
711
|
+
<input type="file" name="image" id="img2img_image" accept="image/*" class="file-input" required>
|
|
712
|
+
</div>
|
|
713
|
+
<div class="form-row">
|
|
714
|
+
<div class="form-group">
|
|
715
|
+
<label for="img2img_strength">Strength</label>
|
|
716
|
+
<input type="number" name="strength" id="img2img_strength" value="0.75" min="0.0" max="1.0" step="0.05">
|
|
717
|
+
<small class="help-text">Higher = more change from original</small>
|
|
718
|
+
</div>
|
|
719
|
+
<div class="form-group">
|
|
720
|
+
<label for="img2img_steps">Steps</label>
|
|
721
|
+
<input type="number" name="num_inference_steps" id="img2img_steps" value="{{ model_parameters.get('num_inference_steps', 28) }}" min="1" max="100">
|
|
722
|
+
</div>
|
|
723
|
+
</div>
|
|
724
|
+
<div class="form-row">
|
|
725
|
+
<div class="form-group">
|
|
726
|
+
<label for="img2img_guidance">Guidance Scale</label>
|
|
727
|
+
<input type="number" name="guidance_scale" id="img2img_guidance" value="{{ model_parameters.get('guidance_scale', 3.5) }}" min="0" max="20" step="0.1">
|
|
728
|
+
</div>
|
|
729
|
+
<div class="form-group">
|
|
730
|
+
<label for="img2img_seed">Seed</label>
|
|
731
|
+
<input type="number" name="seed" id="img2img_seed" placeholder="Random" min="0" max="4294967295">
|
|
732
|
+
</div>
|
|
733
|
+
</div>
|
|
734
|
+
<input type="hidden" name="negative_prompt" value="low quality, bad anatomy, worst quality, low resolution">
|
|
735
|
+
<button type="submit" class="btn" {{ 'disabled' if not model_loaded }}>
|
|
736
|
+
🖼️ Generate img2img
|
|
737
|
+
</button>
|
|
738
|
+
</form>
|
|
739
|
+
</details>
|
|
694
740
|
</div>
|
|
695
741
|
|
|
696
742
|
<div class="result-section">
|
|
@@ -722,6 +768,21 @@
|
|
|
722
768
|
<p style="color: #6b7280; margin-top: 15px; text-align: center;">
|
|
723
769
|
<strong>Prompt:</strong> {{ prompt }}
|
|
724
770
|
</p>
|
|
771
|
+
{% elif input_image_b64 and image_b64 %}
|
|
772
|
+
<!-- img2img Results -->
|
|
773
|
+
<div class="controlnet-results">
|
|
774
|
+
<div>
|
|
775
|
+
<h3 style="color: #374151; margin-bottom: 10px; text-align: center;">🖼️ Input Image</h3>
|
|
776
|
+
<img src="data:image/png;base64,{{ input_image_b64 }}" alt="Input Image" class="result-image">
|
|
777
|
+
</div>
|
|
778
|
+
<div>
|
|
779
|
+
<h3 style="color: #374151; margin-bottom: 10px; text-align: center;">🎨 Generated Image</h3>
|
|
780
|
+
<img src="data:image/png;base64,{{ image_b64 }}" alt="Generated Image" class="result-image">
|
|
781
|
+
</div>
|
|
782
|
+
</div>
|
|
783
|
+
<p style="color: #6b7280; margin-top: 15px; text-align: center;">
|
|
784
|
+
<strong>Prompt:</strong> {{ prompt }}
|
|
785
|
+
</p>
|
|
725
786
|
{% elif image_b64 %}
|
|
726
787
|
<!-- Regular Generation Result -->
|
|
727
788
|
<img src="data:image/png;base64,{{ image_b64 }}" alt="Generated Image" class="result-image">
|
ollamadiffuser/ui/web.py
CHANGED
|
@@ -1,12 +1,15 @@
|
|
|
1
|
-
|
|
2
|
-
from fastapi.responses import HTMLResponse, StreamingResponse
|
|
3
|
-
from fastapi.staticfiles import StaticFiles
|
|
4
|
-
from fastapi.templating import Jinja2Templates
|
|
1
|
+
import asyncio
|
|
5
2
|
import io
|
|
6
3
|
import base64
|
|
7
4
|
import logging
|
|
8
5
|
import json
|
|
9
6
|
from pathlib import Path
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
from fastapi import FastAPI, Request, Form, File, UploadFile
|
|
10
|
+
from fastapi.responses import HTMLResponse, StreamingResponse
|
|
11
|
+
from fastapi.staticfiles import StaticFiles
|
|
12
|
+
from fastapi.templating import Jinja2Templates
|
|
10
13
|
from PIL import Image
|
|
11
14
|
|
|
12
15
|
from ..core.models.manager import model_manager
|
|
@@ -364,72 +367,65 @@ def create_ui_app() -> FastAPI:
|
|
|
364
367
|
guidance_scale: float = Form(3.5),
|
|
365
368
|
width: int = Form(1024),
|
|
366
369
|
height: int = Form(1024),
|
|
370
|
+
seed: Optional[int] = Form(None),
|
|
367
371
|
control_image: UploadFile = File(None),
|
|
368
372
|
controlnet_conditioning_scale: float = Form(1.0),
|
|
369
373
|
control_guidance_start: float = Form(0.0),
|
|
370
|
-
control_guidance_end: float = Form(1.0)
|
|
374
|
+
control_guidance_end: float = Form(1.0),
|
|
371
375
|
):
|
|
372
|
-
"""Generate image (Web UI)"""
|
|
376
|
+
"""Generate image (Web UI) - runs generation in thread pool"""
|
|
373
377
|
error_message = None
|
|
374
378
|
image_b64 = None
|
|
375
379
|
control_image_b64 = None
|
|
376
|
-
|
|
380
|
+
|
|
377
381
|
try:
|
|
378
|
-
# Check if model is actually loaded in memory
|
|
379
382
|
if not model_manager.is_model_loaded():
|
|
380
383
|
error_message = "No model loaded. Please load a model first using the model management section above."
|
|
381
|
-
|
|
384
|
+
|
|
382
385
|
if not error_message:
|
|
383
|
-
# Get inference engine
|
|
384
386
|
engine = model_manager.loaded_model
|
|
385
|
-
|
|
386
387
|
if engine is None:
|
|
387
388
|
error_message = "Model engine is not available. Please reload the model."
|
|
388
389
|
else:
|
|
389
390
|
# Process control image if provided
|
|
390
391
|
control_image_pil = None
|
|
391
392
|
if control_image and control_image.filename:
|
|
392
|
-
# Initialize ControlNet preprocessors if needed
|
|
393
393
|
if not controlnet_preprocessor.is_initialized():
|
|
394
|
-
logger.info("Initializing ControlNet preprocessors for image processing...")
|
|
395
394
|
if not controlnet_preprocessor.initialize():
|
|
396
|
-
error_message = "Failed to initialize ControlNet preprocessors.
|
|
397
|
-
|
|
395
|
+
error_message = "Failed to initialize ControlNet preprocessors."
|
|
396
|
+
|
|
398
397
|
if not error_message:
|
|
399
|
-
# Read uploaded image
|
|
400
398
|
image_data = await control_image.read()
|
|
401
399
|
control_image_pil = Image.open(io.BytesIO(image_data)).convert('RGB')
|
|
402
|
-
|
|
403
|
-
# Convert control image to base64 for display
|
|
404
400
|
img_buffer = io.BytesIO()
|
|
405
401
|
control_image_pil.save(img_buffer, format='PNG')
|
|
406
402
|
img_buffer.seek(0)
|
|
407
403
|
control_image_b64 = base64.b64encode(img_buffer.getvalue()).decode()
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
404
|
+
|
|
405
|
+
if not error_message:
|
|
406
|
+
image = await asyncio.to_thread(
|
|
407
|
+
engine.generate_image,
|
|
408
|
+
prompt=prompt,
|
|
409
|
+
negative_prompt=negative_prompt,
|
|
410
|
+
num_inference_steps=num_inference_steps,
|
|
411
|
+
guidance_scale=guidance_scale,
|
|
412
|
+
width=width,
|
|
413
|
+
height=height,
|
|
414
|
+
seed=seed,
|
|
415
|
+
control_image=control_image_pil,
|
|
416
|
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
|
417
|
+
control_guidance_start=control_guidance_start,
|
|
418
|
+
control_guidance_end=control_guidance_end,
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
img_buffer = io.BytesIO()
|
|
422
|
+
image.save(img_buffer, format='PNG')
|
|
423
|
+
img_buffer.seek(0)
|
|
424
|
+
image_b64 = base64.b64encode(img_buffer.getvalue()).decode()
|
|
425
|
+
|
|
429
426
|
except Exception as e:
|
|
430
427
|
error_message = f"Image generation failed: {str(e)}"
|
|
431
|
-
|
|
432
|
-
# Return result page
|
|
428
|
+
|
|
433
429
|
context = get_template_context(request)
|
|
434
430
|
context.update({
|
|
435
431
|
"prompt": prompt,
|
|
@@ -438,14 +434,80 @@ def create_ui_app() -> FastAPI:
|
|
|
438
434
|
"guidance_scale": guidance_scale,
|
|
439
435
|
"width": width,
|
|
440
436
|
"height": height,
|
|
437
|
+
"seed": seed,
|
|
441
438
|
"controlnet_conditioning_scale": controlnet_conditioning_scale,
|
|
442
439
|
"control_guidance_start": control_guidance_start,
|
|
443
440
|
"control_guidance_end": control_guidance_end,
|
|
444
441
|
"image_b64": image_b64,
|
|
445
442
|
"control_image_b64": control_image_b64,
|
|
446
|
-
"error_message": error_message
|
|
443
|
+
"error_message": error_message,
|
|
447
444
|
})
|
|
448
|
-
|
|
445
|
+
|
|
446
|
+
return templates.TemplateResponse("index.html", context)
|
|
447
|
+
|
|
448
|
+
@app.post("/generate/img2img")
|
|
449
|
+
async def generate_img2img_ui(
|
|
450
|
+
request: Request,
|
|
451
|
+
prompt: str = Form(...),
|
|
452
|
+
negative_prompt: str = Form("low quality, bad anatomy, worst quality, low resolution"),
|
|
453
|
+
num_inference_steps: int = Form(28),
|
|
454
|
+
guidance_scale: float = Form(3.5),
|
|
455
|
+
seed: Optional[int] = Form(None),
|
|
456
|
+
strength: float = Form(0.75),
|
|
457
|
+
image: UploadFile = File(...),
|
|
458
|
+
):
|
|
459
|
+
"""Image-to-image generation (Web UI)"""
|
|
460
|
+
error_message = None
|
|
461
|
+
image_b64 = None
|
|
462
|
+
input_image_b64 = None
|
|
463
|
+
|
|
464
|
+
try:
|
|
465
|
+
if not model_manager.is_model_loaded():
|
|
466
|
+
error_message = "No model loaded."
|
|
467
|
+
else:
|
|
468
|
+
engine = model_manager.loaded_model
|
|
469
|
+
image_data = await image.read()
|
|
470
|
+
input_image = Image.open(io.BytesIO(image_data)).convert('RGB')
|
|
471
|
+
|
|
472
|
+
# Show the input image
|
|
473
|
+
buf = io.BytesIO()
|
|
474
|
+
input_image.save(buf, format='PNG')
|
|
475
|
+
buf.seek(0)
|
|
476
|
+
input_image_b64 = base64.b64encode(buf.getvalue()).decode()
|
|
477
|
+
|
|
478
|
+
result = await asyncio.to_thread(
|
|
479
|
+
engine.generate_image,
|
|
480
|
+
prompt=prompt,
|
|
481
|
+
negative_prompt=negative_prompt,
|
|
482
|
+
num_inference_steps=num_inference_steps,
|
|
483
|
+
guidance_scale=guidance_scale,
|
|
484
|
+
width=input_image.width,
|
|
485
|
+
height=input_image.height,
|
|
486
|
+
seed=seed,
|
|
487
|
+
image=input_image,
|
|
488
|
+
strength=strength,
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
buf = io.BytesIO()
|
|
492
|
+
result.save(buf, format='PNG')
|
|
493
|
+
buf.seek(0)
|
|
494
|
+
image_b64 = base64.b64encode(buf.getvalue()).decode()
|
|
495
|
+
|
|
496
|
+
except Exception as e:
|
|
497
|
+
error_message = f"img2img generation failed: {str(e)}"
|
|
498
|
+
|
|
499
|
+
context = get_template_context(request)
|
|
500
|
+
context.update({
|
|
501
|
+
"prompt": prompt,
|
|
502
|
+
"negative_prompt": negative_prompt,
|
|
503
|
+
"num_inference_steps": num_inference_steps,
|
|
504
|
+
"guidance_scale": guidance_scale,
|
|
505
|
+
"seed": seed,
|
|
506
|
+
"image_b64": image_b64,
|
|
507
|
+
"input_image_b64": input_image_b64,
|
|
508
|
+
"error_message": error_message,
|
|
509
|
+
})
|
|
510
|
+
|
|
449
511
|
return templates.TemplateResponse("index.html", context)
|
|
450
512
|
|
|
451
513
|
@app.post("/preprocess_control_image")
|
|
@@ -482,25 +544,24 @@ def create_ui_app() -> FastAPI:
|
|
|
482
544
|
|
|
483
545
|
@app.post("/load_model")
|
|
484
546
|
async def load_model_ui(request: Request, model_name: str = Form(...)):
|
|
485
|
-
"""Load model (Web UI)"""
|
|
547
|
+
"""Load model (Web UI) - runs loading in thread pool"""
|
|
486
548
|
success = False
|
|
487
549
|
error_message = None
|
|
488
|
-
|
|
550
|
+
|
|
489
551
|
try:
|
|
490
|
-
if model_manager.load_model
|
|
552
|
+
if await asyncio.to_thread(model_manager.load_model, model_name):
|
|
491
553
|
success = True
|
|
492
554
|
else:
|
|
493
555
|
error_message = f"Failed to load model {model_name}"
|
|
494
556
|
except Exception as e:
|
|
495
557
|
error_message = f"Error loading model: {str(e)}"
|
|
496
|
-
|
|
497
|
-
# Return result page
|
|
558
|
+
|
|
498
559
|
context = get_template_context(request)
|
|
499
560
|
context.update({
|
|
500
561
|
"success_message": f"Model {model_name} loaded successfully!" if success else None,
|
|
501
|
-
"error_message": error_message
|
|
562
|
+
"error_message": error_message,
|
|
502
563
|
})
|
|
503
|
-
|
|
564
|
+
|
|
504
565
|
return templates.TemplateResponse("index.html", context)
|
|
505
566
|
|
|
506
567
|
@app.post("/unload_model")
|
|
@@ -573,12 +634,13 @@ def create_ui_app() -> FastAPI:
|
|
|
573
634
|
"""Pull LoRA from Hugging Face Hub (Web UI)"""
|
|
574
635
|
success = False
|
|
575
636
|
error_message = None
|
|
576
|
-
|
|
637
|
+
final_name = repo_id
|
|
638
|
+
|
|
577
639
|
try:
|
|
578
640
|
# Use alias if provided, otherwise use repo_id
|
|
579
641
|
lora_alias = alias if alias.strip() else None
|
|
580
642
|
weight_file = weight_name if weight_name.strip() else None
|
|
581
|
-
|
|
643
|
+
|
|
582
644
|
if lora_manager.pull_lora(repo_id, weight_name=weight_file, alias=lora_alias):
|
|
583
645
|
success = True
|
|
584
646
|
final_name = lora_alias if lora_alias else repo_id.replace('/', '_')
|
|
@@ -586,11 +648,11 @@ def create_ui_app() -> FastAPI:
|
|
|
586
648
|
error_message = f"Failed to download LoRA {repo_id}"
|
|
587
649
|
except Exception as e:
|
|
588
650
|
error_message = f"Error downloading LoRA: {str(e)}"
|
|
589
|
-
|
|
651
|
+
|
|
590
652
|
# Return result page
|
|
591
653
|
context = get_template_context(request)
|
|
592
654
|
context.update({
|
|
593
|
-
"success_message": f"LoRA {final_name
|
|
655
|
+
"success_message": f"LoRA {final_name} downloaded successfully!" if success else None,
|
|
594
656
|
"error_message": error_message
|
|
595
657
|
})
|
|
596
658
|
|