isa-model 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. isa_model/config/__init__.py +9 -0
  2. isa_model/config/config_manager.py +213 -0
  3. isa_model/core/model_manager.py +5 -0
  4. isa_model/core/model_registry.py +39 -6
  5. isa_model/core/storage/supabase_storage.py +344 -0
  6. isa_model/core/vision_models_init.py +116 -0
  7. isa_model/deployment/cloud/__init__.py +9 -0
  8. isa_model/deployment/cloud/modal/__init__.py +10 -0
  9. isa_model/deployment/cloud/modal/isa_vision_doc_service.py +612 -0
  10. isa_model/deployment/cloud/modal/isa_vision_ui_service.py +305 -0
  11. isa_model/inference/ai_factory.py +238 -14
  12. isa_model/inference/providers/modal_provider.py +109 -0
  13. isa_model/inference/providers/yyds_provider.py +108 -0
  14. isa_model/inference/services/__init__.py +2 -1
  15. isa_model/inference/services/base_service.py +0 -38
  16. isa_model/inference/services/llm/base_llm_service.py +32 -0
  17. isa_model/inference/services/llm/llm_adapter.py +73 -3
  18. isa_model/inference/services/llm/ollama_llm_service.py +104 -3
  19. isa_model/inference/services/llm/openai_llm_service.py +67 -15
  20. isa_model/inference/services/llm/yyds_llm_service.py +254 -0
  21. isa_model/inference/services/stacked/__init__.py +26 -0
  22. isa_model/inference/services/stacked/base_stacked_service.py +269 -0
  23. isa_model/inference/services/stacked/config.py +426 -0
  24. isa_model/inference/services/stacked/doc_analysis_service.py +640 -0
  25. isa_model/inference/services/stacked/flux_professional_service.py +579 -0
  26. isa_model/inference/services/stacked/ui_analysis_service.py +1319 -0
  27. isa_model/inference/services/vision/base_image_gen_service.py +0 -34
  28. isa_model/inference/services/vision/base_vision_service.py +46 -2
  29. isa_model/inference/services/vision/isA_vision_service.py +402 -0
  30. isa_model/inference/services/vision/openai_vision_service.py +151 -9
  31. isa_model/inference/services/vision/replicate_image_gen_service.py +166 -38
  32. isa_model/inference/services/vision/replicate_vision_service.py +693 -0
  33. isa_model/serving/__init__.py +19 -0
  34. isa_model/serving/api/__init__.py +10 -0
  35. isa_model/serving/api/fastapi_server.py +84 -0
  36. isa_model/serving/api/middleware/__init__.py +9 -0
  37. isa_model/serving/api/middleware/request_logger.py +88 -0
  38. isa_model/serving/api/routes/__init__.py +5 -0
  39. isa_model/serving/api/routes/health.py +82 -0
  40. isa_model/serving/api/routes/llm.py +19 -0
  41. isa_model/serving/api/routes/ui_analysis.py +223 -0
  42. isa_model/serving/api/routes/vision.py +19 -0
  43. isa_model/serving/api/schemas/__init__.py +17 -0
  44. isa_model/serving/api/schemas/common.py +33 -0
  45. isa_model/serving/api/schemas/ui_analysis.py +78 -0
  46. {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/METADATA +1 -1
  47. {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/RECORD +49 -17
  48. {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/WHEEL +0 -0
  49. {isa_model-0.3.3.dist-info → isa_model-0.3.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,579 @@
1
+ """
2
+ FLUX Professional Pipeline Service
3
+ Multi-stage AI image generation with FLUX + ControlNet + LoRA + Upscaling
4
+ """
5
+
6
+ import asyncio
7
+ import logging
8
+ import base64
9
+ import io
10
+ from typing import Dict, Any, List, Optional
11
+ from PIL import Image
12
+
13
+ from .base_stacked_service import BaseStackedService, LayerConfig, LayerType, LayerResult
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class FluxProfessionalService(BaseStackedService):
19
+ """
20
+ FLUX Professional Pipeline Service
21
+
22
+ 5-Stage Professional Image Generation:
23
+ 1. FLUX Base Generation - High-quality base image generation
24
+ 2. ControlNet Refinement - Precise composition and pose control
25
+ 3. LoRA Style Application - Custom style and character application
26
+ 4. Detail Enhancement - Face/detail restoration and refinement
27
+ 5. Ultimate Upscaling - Professional 4K/8K/16K upscaling
28
+ """
29
+
30
+ def __init__(self, ai_factory):
31
+ super().__init__(ai_factory, "FLUX Professional Pipeline")
32
+ self._setup_layers()
33
+
34
+ def _setup_layers(self):
35
+ """Setup the 5-stage FLUX professional pipeline"""
36
+
37
+ # Stage 1: FLUX Base Generation
38
+ self.add_layer(LayerConfig(
39
+ name="flux_base_generation",
40
+ layer_type=LayerType.GENERATION,
41
+ service_type="image_gen",
42
+ model_name="flux-pro",
43
+ parameters={
44
+ "width": 1024,
45
+ "height": 1024,
46
+ "steps": 25,
47
+ "guidance_scale": 3.5,
48
+ "seed": -1
49
+ },
50
+ depends_on=[],
51
+ timeout=120.0,
52
+ retry_count=2
53
+ ))
54
+
55
+ # Stage 2: ControlNet Refinement
56
+ self.add_layer(LayerConfig(
57
+ name="controlnet_refinement",
58
+ layer_type=LayerType.CONTROL,
59
+ service_type="image_gen",
60
+ model_name="flux-controlnet",
61
+ parameters={
62
+ "controlnet_type": "canny", # canny, depth, hed
63
+ "controlnet_conditioning_scale": 0.8,
64
+ "control_guidance_start": 0.0,
65
+ "control_guidance_end": 1.0
66
+ },
67
+ depends_on=["flux_base_generation"],
68
+ timeout=90.0,
69
+ retry_count=1
70
+ ))
71
+
72
+ # Stage 3: LoRA Style Application
73
+ self.add_layer(LayerConfig(
74
+ name="lora_style_application",
75
+ layer_type=LayerType.ENHANCEMENT,
76
+ service_type="image_gen",
77
+ model_name="flux-lora",
78
+ parameters={
79
+ "lora_models": ["realism", "anime", "art_style"],
80
+ "lora_weights": [0.8, 0.0, 0.6], # Mix multiple LoRAs
81
+ "denoising_strength": 0.3
82
+ },
83
+ depends_on=["controlnet_refinement"],
84
+ timeout=80.0,
85
+ retry_count=1
86
+ ))
87
+
88
+ # Stage 4: Detail Enhancement (ADetailer)
89
+ self.add_layer(LayerConfig(
90
+ name="detail_enhancement",
91
+ layer_type=LayerType.ENHANCEMENT,
92
+ service_type="image_gen",
93
+ model_name="adetailer",
94
+ parameters={
95
+ "face_detector": "mediapipe_face_full",
96
+ "face_model": "face_yolov8n.pt",
97
+ "restore_face": True,
98
+ "denoising_strength": 0.4,
99
+ "inpaint_only_masked": True
100
+ },
101
+ depends_on=["lora_style_application"],
102
+ timeout=70.0,
103
+ retry_count=1
104
+ ))
105
+
106
+ # Stage 5: Ultimate Upscaling
107
+ self.add_layer(LayerConfig(
108
+ name="ultimate_upscaling",
109
+ layer_type=LayerType.UPSCALING,
110
+ service_type="image_gen",
111
+ model_name="ultimate-upscaler",
112
+ parameters={
113
+ "upscaler": "ESRGAN_4x",
114
+ "scale_factor": 4, # 4K upscaling
115
+ "tile_width": 512,
116
+ "tile_height": 512,
117
+ "mask_blur": 8,
118
+ "padding": 32,
119
+ "seam_fix_mode": "Band Pass",
120
+ "seam_fix_denoise": 0.35,
121
+ "seam_fix_width": 64,
122
+ "seam_fix_mask_blur": 8,
123
+ "seam_fix_padding": 16
124
+ },
125
+ depends_on=["detail_enhancement"],
126
+ timeout=300.0, # Upscaling takes longer
127
+ retry_count=1
128
+ ))
129
+
130
+ async def initialize_services(self):
131
+ """Initialize image generation services for FLUX pipeline"""
132
+ for layer in self.layers:
133
+ service_key = f"{layer.service_type}_{layer.model_name}"
134
+
135
+ if service_key not in self.services:
136
+ if layer.service_type == 'image_gen':
137
+ # Get appropriate image generation service based on model
138
+ if "flux" in layer.model_name:
139
+ service = self.ai_factory.get_image_gen(
140
+ model_name=layer.model_name,
141
+ provider="replicate" # or "modal" if we have flux on modal
142
+ )
143
+ elif layer.model_name == "ultimate-upscaler":
144
+ service = self.ai_factory.get_image_gen(
145
+ model_name="ultimate-sd-upscale",
146
+ provider="replicate"
147
+ )
148
+ elif layer.model_name == "adetailer":
149
+ service = self.ai_factory.get_image_gen(
150
+ model_name="adetailer",
151
+ provider="replicate"
152
+ )
153
+ else:
154
+ # Default image generation service
155
+ service = self.ai_factory.get_image_gen()
156
+ else:
157
+ raise ValueError(f"Unsupported service type: {layer.service_type}")
158
+
159
+ self.services[service_key] = service
160
+ logger.info(f"Initialized {service_key} service for FLUX pipeline")
161
+
162
+ async def execute_layer_logic(self, layer: LayerConfig, service: Any, context: Dict[str, Any]) -> Dict[str, Any]:
163
+ """Execute specific layer logic for FLUX pipeline"""
164
+
165
+ if layer.name == "flux_base_generation":
166
+ return await self._execute_flux_base_generation(layer, service, context)
167
+
168
+ elif layer.name == "controlnet_refinement":
169
+ return await self._execute_controlnet_refinement(layer, service, context)
170
+
171
+ elif layer.name == "lora_style_application":
172
+ return await self._execute_lora_application(layer, service, context)
173
+
174
+ elif layer.name == "detail_enhancement":
175
+ return await self._execute_detail_enhancement(layer, service, context)
176
+
177
+ elif layer.name == "ultimate_upscaling":
178
+ return await self._execute_ultimate_upscaling(layer, service, context)
179
+
180
+ else:
181
+ raise ValueError(f"Unknown layer: {layer.name}")
182
+
183
+ async def _execute_flux_base_generation(self, layer: LayerConfig, service: Any, context: Dict[str, Any]) -> Dict[str, Any]:
184
+ """Execute FLUX base image generation"""
185
+ prompt = context.get("prompt", "")
186
+ negative_prompt = context.get("negative_prompt", "")
187
+
188
+ if not prompt:
189
+ raise ValueError("Prompt is required for FLUX base generation")
190
+
191
+ # Enhance prompt for professional quality
192
+ enhanced_prompt = f"{prompt}, masterpiece, best quality, highly detailed, professional photography, 8k uhd"
193
+
194
+ result = await service.generate_image(
195
+ prompt=enhanced_prompt,
196
+ negative_prompt=f"{negative_prompt}, low quality, blurry, artifacts, distorted",
197
+ width=layer.parameters["width"],
198
+ height=layer.parameters["height"],
199
+ num_inference_steps=layer.parameters["steps"],
200
+ guidance_scale=layer.parameters["guidance_scale"],
201
+ seed=layer.parameters.get("seed", -1)
202
+ )
203
+
204
+ if not result or not result.get("urls"):
205
+ raise Exception(f"FLUX base generation failed: no URLs returned")
206
+
207
+ return {
208
+ "image_url": result["urls"][0],
209
+ "image_b64": result.get("image_b64"),
210
+ "seed": result.get("seed"),
211
+ "model_info": result.get("metadata", {})
212
+ }
213
+
214
+ async def _execute_controlnet_refinement(self, layer: LayerConfig, service: Any, context: Dict[str, Any]) -> Dict[str, Any]:
215
+ """Execute ControlNet refinement for precise control"""
216
+ base_result = self.results["flux_base_generation"]
217
+ base_image = base_result.data["image_url"]
218
+
219
+ prompt = context.get("prompt", "")
220
+ control_image = context.get("control_image") # Optional control image
221
+
222
+ if not control_image:
223
+ # Use base image as control image for self-refinement
224
+ control_image = base_image
225
+
226
+ result = await service.image_to_image(
227
+ prompt=prompt,
228
+ init_image=base_image,
229
+ num_inference_steps=20,
230
+ guidance_scale=7.5
231
+ )
232
+
233
+ if not result or not result.get("urls"):
234
+ # Fallback to base image if ControlNet fails
235
+ logger.warning("ControlNet refinement failed, using base image")
236
+ return base_result.data
237
+
238
+ return {
239
+ "image_url": result["urls"][0],
240
+ "image_b64": result.get("image_b64"),
241
+ "control_type": layer.parameters["controlnet_type"],
242
+ "model_info": result.get("metadata", {})
243
+ }
244
+
245
+ async def _execute_lora_application(self, layer: LayerConfig, service: Any, context: Dict[str, Any]) -> Dict[str, Any]:
246
+ """Execute LoRA style application with character consistency"""
247
+ refined_result = self.results["controlnet_refinement"]
248
+ input_image = refined_result.data["image_url"]
249
+
250
+ prompt = context.get("prompt", "")
251
+ lora_style = context.get("lora_style", "realism")
252
+ face_image = context.get("face_image") # Character consistency reference
253
+ character_mode = context.get("character_mode", "instant_id") # instant_id, consistent_character, or flux_lora
254
+
255
+ # Use character consistency if face image is provided
256
+ if face_image and hasattr(service, 'instant_id_generation'):
257
+ logger.info("Using InstantID for character-consistent LoRA application")
258
+ try:
259
+ result = await service.instant_id_generation(
260
+ prompt=f"{prompt}, {lora_style} style",
261
+ face_image=face_image,
262
+ identitynet_strength_ratio=0.8,
263
+ adapter_strength_ratio=0.8,
264
+ num_inference_steps=20,
265
+ guidance_scale=5.0
266
+ )
267
+
268
+ if result.get("urls") and len(result["urls"]) > 0:
269
+ return {
270
+ "image_url": result["urls"][0],
271
+ "image_b64": result.get("image_b64"),
272
+ "lora_applied": lora_style,
273
+ "character_consistency": "instant_id",
274
+ "model_info": result.get("model_info", {})
275
+ }
276
+ except Exception as e:
277
+ logger.warning(f"InstantID generation failed: {e}, falling back to standard LoRA")
278
+
279
+ # Use consistent character generation if specified
280
+ elif face_image and character_mode == "consistent_character" and hasattr(service, 'consistent_character_generation'):
281
+ logger.info("Using consistent character generation for LoRA application")
282
+ try:
283
+ result = await service.consistent_character_generation(
284
+ subject=face_image,
285
+ prompt=f"{prompt}, {lora_style} style",
286
+ number_of_images=1
287
+ )
288
+
289
+ if result.get("urls") and len(result["urls"]) > 0:
290
+ return {
291
+ "image_url": result["urls"][0],
292
+ "image_b64": result.get("image_b64"),
293
+ "lora_applied": lora_style,
294
+ "character_consistency": "consistent_character",
295
+ "model_info": result.get("model_info", {})
296
+ }
297
+ except Exception as e:
298
+ logger.warning(f"Consistent character generation failed: {e}, falling back to standard LoRA")
299
+
300
+ # Use FLUX LoRA generation if available
301
+ elif hasattr(service, 'flux_lora_generation'):
302
+ logger.info("Using FLUX LoRA generation")
303
+ try:
304
+ result = await service.flux_lora_generation(
305
+ prompt=f"{prompt}, {lora_style} style",
306
+ lora_scale=layer.parameters["lora_weights"][0],
307
+ num_inference_steps=20,
308
+ guidance_scale=3.5
309
+ )
310
+
311
+ if result.get("urls") and len(result["urls"]) > 0:
312
+ return {
313
+ "image_url": result["urls"][0],
314
+ "image_b64": result.get("image_b64"),
315
+ "lora_applied": lora_style,
316
+ "generation_method": "flux_lora",
317
+ "model_info": result.get("model_info", {})
318
+ }
319
+ except Exception as e:
320
+ logger.warning(f"FLUX LoRA generation failed: {e}, falling back to standard method")
321
+
322
+ # Fallback to standard image generation
323
+ logger.info("Using standard image generation for LoRA application")
324
+ result = await service.image_to_image(
325
+ prompt=f"{prompt}, {lora_style} style",
326
+ init_image=input_image,
327
+ strength=layer.parameters["denoising_strength"],
328
+ num_inference_steps=15
329
+ )
330
+
331
+ if not result or not result.get("urls"):
332
+ # Fallback to previous result if LoRA fails
333
+ logger.warning("LoRA application failed, using refined image")
334
+ return refined_result.data
335
+
336
+ return {
337
+ "image_url": result["urls"][0],
338
+ "image_b64": result.get("image_b64"),
339
+ "lora_applied": lora_style,
340
+ "generation_method": "standard",
341
+ "model_info": result.get("metadata", {})
342
+ }
343
+
344
+ async def _execute_detail_enhancement(self, layer: LayerConfig, service: Any, context: Dict[str, Any]) -> Dict[str, Any]:
345
+ """Execute detail enhancement with ADetailer"""
346
+ styled_result = self.results["lora_style_application"]
347
+ input_image = styled_result.data["image_url"]
348
+
349
+ result = await service.image_to_image(
350
+ prompt="face restoration, detailed enhancement",
351
+ init_image=input_image,
352
+ strength=layer.parameters["denoising_strength"],
353
+ num_inference_steps=20
354
+ )
355
+
356
+ if not result or not result.get("urls"):
357
+ # Fallback to previous result if enhancement fails
358
+ logger.warning("Detail enhancement failed, using styled image")
359
+ return styled_result.data
360
+
361
+ return {
362
+ "image_url": result["urls"][0],
363
+ "image_b64": result.get("image_b64"),
364
+ "faces_enhanced": 1, # Assume face was enhanced
365
+ "model_info": result.get("metadata", {})
366
+ }
367
+
368
+ async def _execute_ultimate_upscaling(self, layer: LayerConfig, service: Any, context: Dict[str, Any]) -> Dict[str, Any]:
369
+ """Execute ultimate upscaling for professional quality"""
370
+ enhanced_result = self.results["detail_enhancement"]
371
+ input_image = enhanced_result.data["image_url"]
372
+
373
+ scale_factor = context.get("upscale_factor", layer.parameters["scale_factor"])
374
+
375
+ # Use ultimate_upscale method if available
376
+ if hasattr(service, 'ultimate_upscale'):
377
+ logger.info("Using ultimate_upscale method for professional upscaling")
378
+ try:
379
+ result = await service.ultimate_upscale(
380
+ image=input_image,
381
+ scale=scale_factor,
382
+ scheduler="K_EULER_ANCESTRAL",
383
+ num_inference_steps=20,
384
+ guidance_scale=10.0,
385
+ strength=0.55,
386
+ hdr=0.0
387
+ )
388
+
389
+ if result.get("urls") and len(result["urls"]) > 0:
390
+ return {
391
+ "image_url": result["urls"][0],
392
+ "image_b64": result.get("image_b64"),
393
+ "upscale_factor": scale_factor,
394
+ "upscaling_method": "ultimate_sd",
395
+ "final_resolution": result.get("resolution"),
396
+ "model_info": result.get("model_info", {})
397
+ }
398
+ except Exception as e:
399
+ logger.warning(f"Ultimate upscale method failed: {e}, falling back to standard invoke")
400
+
401
+ # Fallback to standard service - use image_to_image for upscaling
402
+ logger.info("Using standard image_to_image for upscaling")
403
+ result = await service.image_to_image(
404
+ prompt="high resolution, ultra detailed, 4k quality",
405
+ init_image=input_image,
406
+ strength=0.3, # Light modification for upscaling
407
+ num_inference_steps=25
408
+ )
409
+
410
+ if not result or not result.get("urls"):
411
+ # Fallback to previous result if upscaling fails
412
+ logger.warning("Ultimate upscaling failed, using enhanced image")
413
+ return enhanced_result.data
414
+
415
+ return {
416
+ "image_url": result["urls"][0],
417
+ "image_b64": result.get("image_b64"),
418
+ "upscale_factor": scale_factor,
419
+ "upscaling_method": "standard",
420
+ "final_resolution": f"enhanced_{scale_factor}x",
421
+ "model_info": result.get("metadata", {})
422
+ }
423
+
424
+ async def invoke(self, context: Dict[str, Any]) -> Dict[str, Any]:
425
+ """
426
+ Execute the complete FLUX Professional Pipeline
427
+
428
+ Args:
429
+ context: {
430
+ "prompt": str, # Required: Main generation prompt
431
+ "negative_prompt": str, # Optional: Negative prompt
432
+ "control_image": str, # Optional: Control image URL/path
433
+ "lora_style": str, # Optional: LoRA style ("realism", "anime", "art_style")
434
+ "face_image": str, # Optional: Reference face for character consistency
435
+ "character_mode": str, # Optional: Character consistency mode ("instant_id", "consistent_character", "flux_lora")
436
+ "upscale_factor": int, # Optional: Upscaling factor (2, 4, 8)
437
+ "width": int, # Optional: Base width (default 1024)
438
+ "height": int # Optional: Base height (default 1024)
439
+ }
440
+
441
+ Returns:
442
+ Dict with professional quality image generation results including character consistency
443
+ """
444
+ start_time = asyncio.get_event_loop().time()
445
+
446
+ try:
447
+ # Validate input
448
+ if not context.get("prompt"):
449
+ raise ValueError("Prompt is required for FLUX Professional Pipeline")
450
+
451
+ # Initialize services
452
+ await self.initialize_services()
453
+
454
+ # Execute layers in sequence
455
+ self.results.clear()
456
+
457
+ for layer in self.layers:
458
+ logger.info(f"Executing layer: {layer.name}")
459
+
460
+ result = await self.execute_layer(layer, context)
461
+ self.results[layer.name] = result
462
+
463
+ if not result.success:
464
+ logger.error(f"Layer {layer.name} failed: {result.error}")
465
+ if not layer.fallback_enabled:
466
+ break
467
+
468
+ # Build final result
469
+ total_time = asyncio.get_event_loop().time() - start_time
470
+
471
+ # Get the final high-quality image
472
+ final_result = self.results.get("ultimate_upscaling")
473
+ if not final_result or not final_result.success:
474
+ # Try previous layers as fallback
475
+ for layer_name in ["detail_enhancement", "lora_style_application", "controlnet_refinement", "flux_base_generation"]:
476
+ fallback_result = self.results.get(layer_name)
477
+ if fallback_result and fallback_result.success:
478
+ final_result = fallback_result
479
+ break
480
+
481
+ if not final_result or not final_result.success:
482
+ return {
483
+ "success": False,
484
+ "error": "All pipeline stages failed",
485
+ "service": self.service_name,
486
+ "total_execution_time": total_time,
487
+ "layer_results": {name: result for name, result in self.results.items()}
488
+ }
489
+
490
+ return {
491
+ "success": True,
492
+ "service": self.service_name,
493
+ "total_execution_time": total_time,
494
+ "final_output": {
495
+ "image_url": final_result.data["image_url"],
496
+ "image_b64": final_result.data.get("image_b64"),
497
+ "final_resolution": final_result.data.get("final_resolution"),
498
+ "generation_info": {
499
+ "prompt": context["prompt"],
500
+ "stages_completed": len([r for r in self.results.values() if r.success]),
501
+ "total_stages": len(self.layers),
502
+ "pipeline": "FLUX Professional"
503
+ }
504
+ },
505
+ "layer_results": {name: result for name, result in self.results.items()},
506
+ "performance_metrics": self.get_performance_metrics()
507
+ }
508
+
509
+ except Exception as e:
510
+ total_time = asyncio.get_event_loop().time() - start_time
511
+ logger.error(f"FLUX Professional Pipeline failed: {e}")
512
+
513
+ return {
514
+ "success": False,
515
+ "error": str(e),
516
+ "service": self.service_name,
517
+ "total_execution_time": total_time,
518
+ "layer_results": {name: result for name, result in self.results.items()}
519
+ }
520
+
521
+ def get_performance_metrics(self) -> Dict[str, Any]:
522
+ """Get performance metrics for the pipeline"""
523
+ total_layers = len(self.layers)
524
+ successful_layers = len([r for r in self.results.values() if r.success])
525
+ failed_layers = total_layers - successful_layers
526
+
527
+ total_execution_time = sum(r.execution_time for r in self.results.values())
528
+
529
+ layer_times = {r.layer_name: r.execution_time for r in self.results.values()}
530
+
531
+ return {
532
+ "total_layers": total_layers,
533
+ "successful_layers": successful_layers,
534
+ "failed_layers": failed_layers,
535
+ "total_execution_time": total_execution_time,
536
+ "layer_times": layer_times,
537
+ "pipeline_type": "FLUX Professional",
538
+ "average_layer_time": total_execution_time / max(len(self.results), 1)
539
+ }
540
+
541
+ async def execute_fallback(self, layer: LayerConfig, context: Dict[str, Any], error: str) -> Optional[LayerResult]:
542
+ """Execute fallback logic for failed layers"""
543
+ logger.info(f"Executing fallback for layer {layer.name}")
544
+
545
+ # For image generation layers, try simpler alternatives
546
+ if layer.layer_type == LayerType.GENERATION:
547
+ # Fallback to basic SDXL if FLUX fails
548
+ try:
549
+ basic_service = self.ai_factory.get_image_gen(model_name="black-forest-labs/flux-schnell")
550
+ result = await basic_service.generate_image(
551
+ prompt=context.get("prompt", ""),
552
+ width=1024,
553
+ height=1024
554
+ )
555
+
556
+ if result and result.get("urls"):
557
+ fallback_data = {
558
+ "image_url": result["urls"][0],
559
+ "image_b64": result.get("image_b64"),
560
+ "model_info": result.get("metadata", {})
561
+ }
562
+ return LayerResult(
563
+ layer_name=f"{layer.name}_fallback",
564
+ success=True,
565
+ data=fallback_data,
566
+ metadata={"fallback": True, "original_error": error},
567
+ execution_time=0.0
568
+ )
569
+ except Exception as e:
570
+ logger.error(f"Fallback for {layer.name} also failed: {e}")
571
+
572
+ return None
573
+
574
+ async def close(self):
575
+ """Clean up services"""
576
+ for service in self.services.values():
577
+ if hasattr(service, 'close'):
578
+ await service.close()
579
+ logger.info(f"Closed {self.service_name}")