ollamadiffuser 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,278 @@
1
+ from fastapi import FastAPI, Request, Form, File, UploadFile
2
+ from fastapi.responses import HTMLResponse, StreamingResponse
3
+ from fastapi.staticfiles import StaticFiles
4
+ from fastapi.templating import Jinja2Templates
5
+ import io
6
+ import base64
7
+ from pathlib import Path
8
+
9
+ from ..core.models.manager import model_manager
10
+ from ..core.utils.lora_manager import lora_manager
11
+
12
+ # Get templates directory
13
+ templates_dir = Path(__file__).parent / "templates"
14
+ templates = Jinja2Templates(directory=str(templates_dir))
15
+
16
+ def create_ui_app() -> FastAPI:
17
+ """Create Web UI application"""
18
+ app = FastAPI(title="OllamaDiffuser Web UI")
19
+
20
+ @app.get("/", response_class=HTMLResponse)
21
+ async def home(request: Request):
22
+ """Home page"""
23
+ models = model_manager.list_available_models()
24
+ installed_models = model_manager.list_installed_models()
25
+ current_model = model_manager.get_current_model()
26
+ model_loaded = model_manager.is_model_loaded()
27
+
28
+ # Get LoRA information
29
+ installed_loras = lora_manager.list_installed_loras()
30
+ current_lora = lora_manager.get_current_lora()
31
+
32
+ # Don't auto-load model on startup - let user choose
33
+
34
+ return templates.TemplateResponse("index.html", {
35
+ "request": request,
36
+ "models": models,
37
+ "installed_models": installed_models,
38
+ "current_model": current_model,
39
+ "model_loaded": model_loaded,
40
+ "installed_loras": installed_loras,
41
+ "current_lora": current_lora
42
+ })
43
+
44
+ @app.post("/generate")
45
+ async def generate_image_ui(
46
+ request: Request,
47
+ prompt: str = Form(...),
48
+ negative_prompt: str = Form("low quality, bad anatomy, worst quality, low resolution"),
49
+ num_inference_steps: int = Form(28),
50
+ guidance_scale: float = Form(3.5),
51
+ width: int = Form(1024),
52
+ height: int = Form(1024)
53
+ ):
54
+ """Generate image (Web UI)"""
55
+ error_message = None
56
+ image_b64 = None
57
+
58
+ try:
59
+ # Check if model is actually loaded in memory
60
+ if not model_manager.is_model_loaded():
61
+ error_message = "No model loaded. Please load a model first using the model management section above."
62
+
63
+ if not error_message:
64
+ # Get inference engine
65
+ engine = model_manager.loaded_model
66
+
67
+ if engine is None:
68
+ error_message = "Model engine is not available. Please reload the model."
69
+ else:
70
+ # Generate image
71
+ image = engine.generate_image(
72
+ prompt=prompt,
73
+ negative_prompt=negative_prompt,
74
+ num_inference_steps=num_inference_steps,
75
+ guidance_scale=guidance_scale,
76
+ width=width,
77
+ height=height
78
+ )
79
+
80
+ # Convert to base64
81
+ img_buffer = io.BytesIO()
82
+ image.save(img_buffer, format='PNG')
83
+ img_buffer.seek(0)
84
+ image_b64 = base64.b64encode(img_buffer.getvalue()).decode()
85
+
86
+ except Exception as e:
87
+ error_message = f"Image generation failed: {str(e)}"
88
+
89
+ # Return result page
90
+ models = model_manager.list_available_models()
91
+ installed_models = model_manager.list_installed_models()
92
+ current_model = model_manager.get_current_model()
93
+ installed_loras = lora_manager.list_installed_loras()
94
+ current_lora = lora_manager.get_current_lora()
95
+
96
+ return templates.TemplateResponse("index.html", {
97
+ "request": request,
98
+ "models": models,
99
+ "installed_models": installed_models,
100
+ "current_model": current_model,
101
+ "model_loaded": model_manager.is_model_loaded(),
102
+ "installed_loras": installed_loras,
103
+ "current_lora": current_lora,
104
+ "prompt": prompt,
105
+ "negative_prompt": negative_prompt,
106
+ "num_inference_steps": num_inference_steps,
107
+ "guidance_scale": guidance_scale,
108
+ "width": width,
109
+ "height": height,
110
+ "image_b64": image_b64,
111
+ "error_message": error_message
112
+ })
113
+
114
+ @app.post("/load_model")
115
+ async def load_model_ui(request: Request, model_name: str = Form(...)):
116
+ """Load model (Web UI)"""
117
+ success = False
118
+ error_message = None
119
+
120
+ try:
121
+ if model_manager.load_model(model_name):
122
+ success = True
123
+ else:
124
+ error_message = f"Failed to load model {model_name}"
125
+ except Exception as e:
126
+ error_message = f"Error loading model: {str(e)}"
127
+
128
+ # Redirect back to home page
129
+ models = model_manager.list_available_models()
130
+ installed_models = model_manager.list_installed_models()
131
+ current_model = model_manager.get_current_model()
132
+ installed_loras = lora_manager.list_installed_loras()
133
+ current_lora = lora_manager.get_current_lora()
134
+
135
+ return templates.TemplateResponse("index.html", {
136
+ "request": request,
137
+ "models": models,
138
+ "installed_models": installed_models,
139
+ "current_model": current_model,
140
+ "model_loaded": model_manager.is_model_loaded(),
141
+ "installed_loras": installed_loras,
142
+ "current_lora": current_lora,
143
+ "success_message": f"Model {model_name} loaded successfully!" if success else None,
144
+ "error_message": error_message
145
+ })
146
+
147
+ @app.post("/unload_model")
148
+ async def unload_model_ui(request: Request):
149
+ """Unload current model (Web UI)"""
150
+ try:
151
+ current_model = model_manager.get_current_model()
152
+ model_manager.unload_model()
153
+ success_message = f"Model {current_model} unloaded successfully!" if current_model else "Model unloaded!"
154
+ except Exception as e:
155
+ success_message = None
156
+ error_message = f"Error unloading model: {str(e)}"
157
+
158
+ # Redirect back to home page
159
+ models = model_manager.list_available_models()
160
+ installed_models = model_manager.list_installed_models()
161
+ current_model = model_manager.get_current_model()
162
+ installed_loras = lora_manager.list_installed_loras()
163
+ current_lora = lora_manager.get_current_lora()
164
+
165
+ return templates.TemplateResponse("index.html", {
166
+ "request": request,
167
+ "models": models,
168
+ "installed_models": installed_models,
169
+ "current_model": current_model,
170
+ "model_loaded": model_manager.is_model_loaded(),
171
+ "installed_loras": installed_loras,
172
+ "current_lora": current_lora,
173
+ "success_message": success_message,
174
+ "error_message": error_message if 'error_message' in locals() else None
175
+ })
176
+
177
+ @app.post("/load_lora")
178
+ async def load_lora_ui(request: Request, lora_name: str = Form(...), scale: float = Form(1.0)):
179
+ """Load LoRA (Web UI)"""
180
+ success = False
181
+ error_message = None
182
+
183
+ try:
184
+ if lora_manager.load_lora(lora_name, scale=scale):
185
+ success = True
186
+ else:
187
+ error_message = f"Failed to load LoRA {lora_name}"
188
+ except Exception as e:
189
+ error_message = f"Error loading LoRA: {str(e)}"
190
+
191
+ # Redirect back to home page
192
+ models = model_manager.list_available_models()
193
+ installed_models = model_manager.list_installed_models()
194
+ current_model = model_manager.get_current_model()
195
+ installed_loras = lora_manager.list_installed_loras()
196
+ current_lora = lora_manager.get_current_lora()
197
+
198
+ return templates.TemplateResponse("index.html", {
199
+ "request": request,
200
+ "models": models,
201
+ "installed_models": installed_models,
202
+ "current_model": current_model,
203
+ "model_loaded": model_manager.is_model_loaded(),
204
+ "installed_loras": installed_loras,
205
+ "current_lora": current_lora,
206
+ "success_message": f"LoRA {lora_name} loaded successfully with scale {scale}!" if success else None,
207
+ "error_message": error_message
208
+ })
209
+
210
+ @app.post("/unload_lora")
211
+ async def unload_lora_ui(request: Request):
212
+ """Unload current LoRA (Web UI)"""
213
+ try:
214
+ current_lora_name = lora_manager.get_current_lora()
215
+ lora_manager.unload_lora()
216
+ success_message = f"LoRA {current_lora_name} unloaded successfully!" if current_lora_name else "LoRA unloaded!"
217
+ except Exception as e:
218
+ success_message = None
219
+ error_message = f"Error unloading LoRA: {str(e)}"
220
+
221
+ # Redirect back to home page
222
+ models = model_manager.list_available_models()
223
+ installed_models = model_manager.list_installed_models()
224
+ current_model = model_manager.get_current_model()
225
+ installed_loras = lora_manager.list_installed_loras()
226
+ current_lora = lora_manager.get_current_lora()
227
+
228
+ return templates.TemplateResponse("index.html", {
229
+ "request": request,
230
+ "models": models,
231
+ "installed_models": installed_models,
232
+ "current_model": current_model,
233
+ "model_loaded": model_manager.is_model_loaded(),
234
+ "installed_loras": installed_loras,
235
+ "current_lora": current_lora,
236
+ "success_message": success_message,
237
+ "error_message": error_message if 'error_message' in locals() else None
238
+ })
239
+
240
+ @app.post("/pull_lora")
241
+ async def pull_lora_ui(request: Request, repo_id: str = Form(...), weight_name: str = Form(""), alias: str = Form("")):
242
+ """Pull LoRA from Hugging Face Hub (Web UI)"""
243
+ success = False
244
+ error_message = None
245
+
246
+ try:
247
+ # Use alias if provided, otherwise use repo_id
248
+ lora_alias = alias if alias.strip() else None
249
+ weight_file = weight_name if weight_name.strip() else None
250
+
251
+ if lora_manager.pull_lora(repo_id, weight_name=weight_file, alias=lora_alias):
252
+ success = True
253
+ final_name = lora_alias if lora_alias else repo_id.replace('/', '_')
254
+ else:
255
+ error_message = f"Failed to download LoRA {repo_id}"
256
+ except Exception as e:
257
+ error_message = f"Error downloading LoRA: {str(e)}"
258
+
259
+ # Redirect back to home page
260
+ models = model_manager.list_available_models()
261
+ installed_models = model_manager.list_installed_models()
262
+ current_model = model_manager.get_current_model()
263
+ installed_loras = lora_manager.list_installed_loras()
264
+ current_lora = lora_manager.get_current_lora()
265
+
266
+ return templates.TemplateResponse("index.html", {
267
+ "request": request,
268
+ "models": models,
269
+ "installed_models": installed_models,
270
+ "current_model": current_model,
271
+ "model_loaded": model_manager.is_model_loaded(),
272
+ "installed_loras": installed_loras,
273
+ "current_lora": current_lora,
274
+ "success_message": f"LoRA {final_name if success else repo_id} downloaded successfully!" if success else None,
275
+ "error_message": error_message
276
+ })
277
+
278
+ return app
File without changes