isa-model 0.3.91__py3-none-any.whl → 0.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isa_model/client.py +1166 -584
- isa_model/core/cache/redis_cache.py +410 -0
- isa_model/core/config/config_manager.py +282 -12
- isa_model/core/config.py +91 -1
- isa_model/core/database/__init__.py +1 -0
- isa_model/core/database/direct_db_client.py +114 -0
- isa_model/core/database/migration_manager.py +563 -0
- isa_model/core/database/migrations.py +297 -0
- isa_model/core/database/supabase_client.py +258 -0
- isa_model/core/dependencies.py +316 -0
- isa_model/core/discovery/__init__.py +19 -0
- isa_model/core/discovery/consul_discovery.py +190 -0
- isa_model/core/logging/__init__.py +54 -0
- isa_model/core/logging/influx_logger.py +523 -0
- isa_model/core/logging/loki_logger.py +160 -0
- isa_model/core/models/__init__.py +46 -0
- isa_model/core/models/config_models.py +625 -0
- isa_model/core/models/deployment_billing_tracker.py +430 -0
- isa_model/core/models/model_billing_tracker.py +60 -88
- isa_model/core/models/model_manager.py +66 -25
- isa_model/core/models/model_metadata.py +690 -0
- isa_model/core/models/model_repo.py +217 -55
- isa_model/core/models/model_statistics_tracker.py +234 -0
- isa_model/core/models/model_storage.py +0 -1
- isa_model/core/models/model_version_manager.py +959 -0
- isa_model/core/models/system_models.py +857 -0
- isa_model/core/pricing_manager.py +2 -249
- isa_model/core/repositories/__init__.py +9 -0
- isa_model/core/repositories/config_repository.py +912 -0
- isa_model/core/resilience/circuit_breaker.py +366 -0
- isa_model/core/security/secrets.py +358 -0
- isa_model/core/services/__init__.py +2 -4
- isa_model/core/services/intelligent_model_selector.py +479 -370
- isa_model/core/storage/hf_storage.py +2 -2
- isa_model/core/types.py +8 -0
- isa_model/deployment/__init__.py +5 -48
- isa_model/deployment/core/__init__.py +2 -31
- isa_model/deployment/core/deployment_manager.py +1278 -368
- isa_model/deployment/local/__init__.py +31 -0
- isa_model/deployment/local/config.py +248 -0
- isa_model/deployment/local/gpu_gateway.py +607 -0
- isa_model/deployment/local/health_checker.py +428 -0
- isa_model/deployment/local/provider.py +586 -0
- isa_model/deployment/local/tensorrt_service.py +621 -0
- isa_model/deployment/local/transformers_service.py +644 -0
- isa_model/deployment/local/vllm_service.py +527 -0
- isa_model/deployment/modal/__init__.py +8 -0
- isa_model/deployment/modal/config.py +136 -0
- isa_model/deployment/modal/deployer.py +894 -0
- isa_model/deployment/modal/services/__init__.py +3 -0
- isa_model/deployment/modal/services/audio/__init__.py +1 -0
- isa_model/deployment/modal/services/audio/isa_audio_chatTTS_service.py +520 -0
- isa_model/deployment/modal/services/audio/isa_audio_openvoice_service.py +758 -0
- isa_model/deployment/modal/services/audio/isa_audio_service_v2.py +1044 -0
- isa_model/deployment/modal/services/embedding/__init__.py +1 -0
- isa_model/deployment/modal/services/embedding/isa_embed_rerank_service.py +296 -0
- isa_model/deployment/modal/services/llm/__init__.py +1 -0
- isa_model/deployment/modal/services/llm/isa_llm_service.py +424 -0
- isa_model/deployment/modal/services/video/__init__.py +1 -0
- isa_model/deployment/modal/services/video/isa_video_hunyuan_service.py +423 -0
- isa_model/deployment/modal/services/vision/__init__.py +1 -0
- isa_model/deployment/modal/services/vision/isa_vision_ocr_service.py +519 -0
- isa_model/deployment/modal/services/vision/isa_vision_qwen25_service.py +709 -0
- isa_model/deployment/modal/services/vision/isa_vision_table_service.py +676 -0
- isa_model/deployment/modal/services/vision/isa_vision_ui_service.py +833 -0
- isa_model/deployment/modal/services/vision/isa_vision_ui_service_optimized.py +660 -0
- isa_model/deployment/models/org-org-acme-corp-tenant-a-service-llm-20250825-225822/tenant-a-service_modal_service.py +48 -0
- isa_model/deployment/models/org-test-org-123-prefix-test-service-llm-20250825-225822/prefix-test-service_modal_service.py +48 -0
- isa_model/deployment/models/test-llm-service-llm-20250825-204442/test-llm-service_modal_service.py +48 -0
- isa_model/deployment/models/test-monitoring-gpt2-llm-20250825-212906/test-monitoring-gpt2_modal_service.py +48 -0
- isa_model/deployment/models/test-monitoring-gpt2-llm-20250825-213009/test-monitoring-gpt2_modal_service.py +48 -0
- isa_model/deployment/storage/__init__.py +5 -0
- isa_model/deployment/storage/deployment_repository.py +824 -0
- isa_model/deployment/triton/__init__.py +10 -0
- isa_model/deployment/triton/config.py +196 -0
- isa_model/deployment/triton/configs/__init__.py +1 -0
- isa_model/deployment/triton/provider.py +512 -0
- isa_model/deployment/triton/scripts/__init__.py +1 -0
- isa_model/deployment/triton/templates/__init__.py +1 -0
- isa_model/inference/__init__.py +47 -1
- isa_model/inference/ai_factory.py +179 -16
- isa_model/inference/legacy_services/__init__.py +21 -0
- isa_model/inference/legacy_services/model_evaluation.py +637 -0
- isa_model/inference/legacy_services/model_service.py +573 -0
- isa_model/inference/legacy_services/model_serving.py +717 -0
- isa_model/inference/legacy_services/model_training.py +561 -0
- isa_model/inference/models/__init__.py +21 -0
- isa_model/inference/models/inference_config.py +551 -0
- isa_model/inference/models/inference_record.py +675 -0
- isa_model/inference/models/performance_models.py +714 -0
- isa_model/inference/repositories/__init__.py +9 -0
- isa_model/inference/repositories/inference_repository.py +828 -0
- isa_model/inference/services/audio/__init__.py +21 -0
- isa_model/inference/services/audio/base_realtime_service.py +225 -0
- isa_model/inference/services/audio/base_stt_service.py +184 -11
- isa_model/inference/services/audio/isa_tts_service.py +0 -0
- isa_model/inference/services/audio/openai_realtime_service.py +320 -124
- isa_model/inference/services/audio/openai_stt_service.py +53 -11
- isa_model/inference/services/base_service.py +17 -1
- isa_model/inference/services/custom_model_manager.py +277 -0
- isa_model/inference/services/embedding/__init__.py +13 -0
- isa_model/inference/services/embedding/base_embed_service.py +111 -8
- isa_model/inference/services/embedding/isa_embed_service.py +305 -0
- isa_model/inference/services/embedding/ollama_embed_service.py +15 -3
- isa_model/inference/services/embedding/openai_embed_service.py +2 -4
- isa_model/inference/services/embedding/resilient_embed_service.py +285 -0
- isa_model/inference/services/embedding/tests/test_embedding.py +222 -0
- isa_model/inference/services/img/__init__.py +2 -2
- isa_model/inference/services/img/base_image_gen_service.py +24 -7
- isa_model/inference/services/img/replicate_image_gen_service.py +84 -422
- isa_model/inference/services/img/services/replicate_face_swap.py +193 -0
- isa_model/inference/services/img/services/replicate_flux.py +226 -0
- isa_model/inference/services/img/services/replicate_flux_kontext.py +219 -0
- isa_model/inference/services/img/services/replicate_sticker_maker.py +249 -0
- isa_model/inference/services/img/tests/test_img_client.py +297 -0
- isa_model/inference/services/llm/__init__.py +10 -2
- isa_model/inference/services/llm/base_llm_service.py +361 -26
- isa_model/inference/services/llm/cerebras_llm_service.py +628 -0
- isa_model/inference/services/llm/helpers/llm_adapter.py +71 -12
- isa_model/inference/services/llm/helpers/llm_prompts.py +342 -0
- isa_model/inference/services/llm/helpers/llm_utils.py +321 -23
- isa_model/inference/services/llm/huggingface_llm_service.py +581 -0
- isa_model/inference/services/llm/local_llm_service.py +747 -0
- isa_model/inference/services/llm/ollama_llm_service.py +11 -3
- isa_model/inference/services/llm/openai_llm_service.py +670 -56
- isa_model/inference/services/llm/yyds_llm_service.py +10 -3
- isa_model/inference/services/vision/__init__.py +27 -6
- isa_model/inference/services/vision/base_vision_service.py +118 -185
- isa_model/inference/services/vision/blip_vision_service.py +359 -0
- isa_model/inference/services/vision/helpers/image_utils.py +19 -10
- isa_model/inference/services/vision/isa_vision_service.py +634 -0
- isa_model/inference/services/vision/openai_vision_service.py +19 -10
- isa_model/inference/services/vision/tests/test_ocr_client.py +284 -0
- isa_model/inference/services/vision/vgg16_vision_service.py +257 -0
- isa_model/serving/api/cache_manager.py +245 -0
- isa_model/serving/api/dependencies/__init__.py +1 -0
- isa_model/serving/api/dependencies/auth.py +194 -0
- isa_model/serving/api/dependencies/database.py +139 -0
- isa_model/serving/api/error_handlers.py +284 -0
- isa_model/serving/api/fastapi_server.py +240 -18
- isa_model/serving/api/middleware/auth.py +317 -0
- isa_model/serving/api/middleware/security.py +268 -0
- isa_model/serving/api/middleware/tenant_context.py +414 -0
- isa_model/serving/api/routes/analytics.py +489 -0
- isa_model/serving/api/routes/config.py +645 -0
- isa_model/serving/api/routes/deployment_billing.py +315 -0
- isa_model/serving/api/routes/deployments.py +475 -0
- isa_model/serving/api/routes/gpu_gateway.py +440 -0
- isa_model/serving/api/routes/health.py +32 -12
- isa_model/serving/api/routes/inference_monitoring.py +486 -0
- isa_model/serving/api/routes/local_deployments.py +448 -0
- isa_model/serving/api/routes/logs.py +430 -0
- isa_model/serving/api/routes/settings.py +582 -0
- isa_model/serving/api/routes/tenants.py +575 -0
- isa_model/serving/api/routes/unified.py +992 -171
- isa_model/serving/api/routes/webhooks.py +479 -0
- isa_model/serving/api/startup.py +318 -0
- isa_model/serving/modal_proxy_server.py +249 -0
- isa_model/utils/gpu_utils.py +311 -0
- {isa_model-0.3.91.dist-info → isa_model-0.4.3.dist-info}/METADATA +76 -22
- isa_model-0.4.3.dist-info/RECORD +193 -0
- isa_model/deployment/cloud/__init__.py +0 -9
- isa_model/deployment/cloud/modal/__init__.py +0 -10
- isa_model/deployment/cloud/modal/isa_vision_doc_service.py +0 -766
- isa_model/deployment/cloud/modal/isa_vision_table_service.py +0 -532
- isa_model/deployment/cloud/modal/isa_vision_ui_service.py +0 -406
- isa_model/deployment/cloud/modal/register_models.py +0 -321
- isa_model/deployment/core/deployment_config.py +0 -356
- isa_model/deployment/core/isa_deployment_service.py +0 -401
- isa_model/deployment/gpu_int8_ds8/app/server.py +0 -66
- isa_model/deployment/gpu_int8_ds8/scripts/test_client.py +0 -43
- isa_model/deployment/gpu_int8_ds8/scripts/test_client_os.py +0 -35
- isa_model/deployment/runtime/deployed_service.py +0 -338
- isa_model/deployment/services/__init__.py +0 -9
- isa_model/deployment/services/auto_deploy_vision_service.py +0 -538
- isa_model/deployment/services/model_service.py +0 -332
- isa_model/deployment/services/service_monitor.py +0 -356
- isa_model/deployment/services/service_registry.py +0 -527
- isa_model/eval/__init__.py +0 -92
- isa_model/eval/benchmarks.py +0 -469
- isa_model/eval/config/__init__.py +0 -10
- isa_model/eval/config/evaluation_config.py +0 -108
- isa_model/eval/evaluators/__init__.py +0 -18
- isa_model/eval/evaluators/base_evaluator.py +0 -503
- isa_model/eval/evaluators/llm_evaluator.py +0 -472
- isa_model/eval/factory.py +0 -531
- isa_model/eval/infrastructure/__init__.py +0 -24
- isa_model/eval/infrastructure/experiment_tracker.py +0 -466
- isa_model/eval/metrics.py +0 -798
- isa_model/inference/adapter/unified_api.py +0 -248
- isa_model/inference/services/helpers/stacked_config.py +0 -148
- isa_model/inference/services/img/flux_professional_service.py +0 -603
- isa_model/inference/services/img/helpers/base_stacked_service.py +0 -274
- isa_model/inference/services/others/table_transformer_service.py +0 -61
- isa_model/inference/services/vision/doc_analysis_service.py +0 -640
- isa_model/inference/services/vision/helpers/base_stacked_service.py +0 -274
- isa_model/inference/services/vision/ui_analysis_service.py +0 -823
- isa_model/scripts/inference_tracker.py +0 -283
- isa_model/scripts/mlflow_manager.py +0 -379
- isa_model/scripts/model_registry.py +0 -465
- isa_model/scripts/register_models.py +0 -370
- isa_model/scripts/register_models_with_embeddings.py +0 -510
- isa_model/scripts/start_mlflow.py +0 -95
- isa_model/scripts/training_tracker.py +0 -257
- isa_model/training/__init__.py +0 -74
- isa_model/training/annotation/annotation_schema.py +0 -47
- isa_model/training/annotation/processors/annotation_processor.py +0 -126
- isa_model/training/annotation/storage/dataset_manager.py +0 -131
- isa_model/training/annotation/storage/dataset_schema.py +0 -44
- isa_model/training/annotation/tests/test_annotation_flow.py +0 -109
- isa_model/training/annotation/tests/test_minio copy.py +0 -113
- isa_model/training/annotation/tests/test_minio_upload.py +0 -43
- isa_model/training/annotation/views/annotation_controller.py +0 -158
- isa_model/training/cloud/__init__.py +0 -22
- isa_model/training/cloud/job_orchestrator.py +0 -402
- isa_model/training/cloud/runpod_trainer.py +0 -454
- isa_model/training/cloud/storage_manager.py +0 -482
- isa_model/training/core/__init__.py +0 -23
- isa_model/training/core/config.py +0 -181
- isa_model/training/core/dataset.py +0 -222
- isa_model/training/core/trainer.py +0 -720
- isa_model/training/core/utils.py +0 -213
- isa_model/training/factory.py +0 -424
- isa_model-0.3.91.dist-info/RECORD +0 -138
- /isa_model/{core/storage/minio_storage.py → deployment/modal/services/audio/isa_audio_fish_service.py} +0 -0
- /isa_model/deployment/{services → modal/services/vision}/simple_auto_deploy_vision_service.py +0 -0
- {isa_model-0.3.91.dist-info → isa_model-0.4.3.dist-info}/WHEEL +0 -0
- {isa_model-0.3.91.dist-info → isa_model-0.4.3.dist-info}/top_level.txt +0 -0
@@ -2,56 +2,52 @@
|
|
2
2
|
# -*- coding: utf-8 -*-
|
3
3
|
|
4
4
|
"""
|
5
|
-
Replicate
|
6
|
-
|
5
|
+
Replicate Image Generation Service (Orchestrator)
|
6
|
+
Delegates to specialized services: FLUX, FLUX Kontext, Sticker Maker, Face Swap
|
7
7
|
"""
|
8
8
|
|
9
|
-
import os
|
10
|
-
import time
|
11
|
-
import uuid
|
12
9
|
import logging
|
13
|
-
from typing import Dict, Any,
|
14
|
-
import asyncio
|
15
|
-
import aiohttp
|
16
|
-
import replicate
|
17
|
-
from PIL import Image
|
18
|
-
from io import BytesIO
|
10
|
+
from typing import Dict, Any, Optional, Union
|
19
11
|
|
20
12
|
from .base_image_gen_service import BaseImageGenService
|
13
|
+
from .services.replicate_flux import ReplicateFluxService
|
14
|
+
from .services.replicate_flux_kontext import ReplicateFluxKontextService
|
15
|
+
from .services.replicate_sticker_maker import ReplicateStickerMakerService
|
16
|
+
from .services.replicate_face_swap import ReplicateFaceSwapService
|
21
17
|
|
22
18
|
logger = logging.getLogger(__name__)
|
23
19
|
|
24
20
|
class ReplicateImageGenService(BaseImageGenService):
|
25
21
|
"""
|
26
|
-
Replicate
|
27
|
-
|
28
|
-
- flux-
|
22
|
+
Replicate Image Generation Service (Orchestrator)
|
23
|
+
Delegates to specialized services based on model type:
|
24
|
+
- flux-schnell: Text-to-image generation
|
25
|
+
- flux-kontext-pro: Image-to-image generation
|
26
|
+
- sticker-maker: Sticker generation
|
27
|
+
- face-swap: Face swapping
|
29
28
|
"""
|
30
29
|
|
31
30
|
def __init__(self, provider_name: str, model_name: str, **kwargs):
|
32
31
|
super().__init__(provider_name, model_name, **kwargs)
|
33
32
|
|
34
|
-
#
|
35
|
-
|
33
|
+
# Initialize the appropriate specialized service
|
34
|
+
self._delegate_service = self._create_delegate_service()
|
36
35
|
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
self.
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
except Exception as e:
|
53
|
-
logger.error(f"Failed to initialize Replicate client: {e}")
|
54
|
-
raise ValueError(f"Failed to initialize Replicate client: {e}") from e
|
36
|
+
logger.info(f"Initialized ReplicateImageGenService orchestrator with model '{self.model_name}'")
|
37
|
+
|
38
|
+
def _create_delegate_service(self) -> BaseImageGenService:
|
39
|
+
"""Create the appropriate specialized service based on model name"""
|
40
|
+
if "flux-schnell" in self.model_name:
|
41
|
+
return ReplicateFluxService(self.provider_name, self.model_name)
|
42
|
+
elif "flux-kontext-pro" in self.model_name:
|
43
|
+
return ReplicateFluxKontextService(self.provider_name, self.model_name)
|
44
|
+
elif "sticker-maker" in self.model_name:
|
45
|
+
return ReplicateStickerMakerService(self.provider_name, self.model_name)
|
46
|
+
elif "face-swap" in self.model_name:
|
47
|
+
return ReplicateFaceSwapService(self.provider_name, self.model_name)
|
48
|
+
else:
|
49
|
+
# Default to FLUX for unknown models
|
50
|
+
return ReplicateFluxService(self.provider_name, self.model_name)
|
55
51
|
|
56
52
|
async def generate_image(
|
57
53
|
self,
|
@@ -63,36 +59,14 @@ class ReplicateImageGenService(BaseImageGenService):
|
|
63
59
|
guidance_scale: float = 7.5,
|
64
60
|
seed: Optional[int] = None
|
65
61
|
) -> Dict[str, Any]:
|
66
|
-
"""
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
"go_fast": True,
|
73
|
-
"megapixels": "1",
|
74
|
-
"num_outputs": 1,
|
75
|
-
"aspect_ratio": "1:1",
|
76
|
-
"output_format": "jpg",
|
77
|
-
"output_quality": 90,
|
78
|
-
"num_inference_steps": 4
|
79
|
-
}
|
62
|
+
"""Generate single image - delegates to appropriate service"""
|
63
|
+
if hasattr(self._delegate_service, 'generate_image'):
|
64
|
+
return await self._delegate_service.generate_image(
|
65
|
+
prompt, negative_prompt, width, height,
|
66
|
+
num_inference_steps, guidance_scale, seed
|
67
|
+
)
|
80
68
|
else:
|
81
|
-
|
82
|
-
input_data = {
|
83
|
-
"prompt": prompt,
|
84
|
-
"width": width,
|
85
|
-
"height": height,
|
86
|
-
"num_inference_steps": num_inference_steps,
|
87
|
-
"guidance_scale": guidance_scale
|
88
|
-
}
|
89
|
-
|
90
|
-
if negative_prompt:
|
91
|
-
input_data["negative_prompt"] = negative_prompt
|
92
|
-
if seed:
|
93
|
-
input_data["seed"] = seed
|
94
|
-
|
95
|
-
return await self._generate_internal(input_data)
|
69
|
+
raise NotImplementedError(f"generate_image not supported by {type(self._delegate_service).__name__}")
|
96
70
|
|
97
71
|
async def image_to_image(
|
98
72
|
self,
|
@@ -104,383 +78,71 @@ class ReplicateImageGenService(BaseImageGenService):
|
|
104
78
|
guidance_scale: float = 7.5,
|
105
79
|
seed: Optional[int] = None
|
106
80
|
) -> Dict[str, Any]:
|
107
|
-
"""
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
"input_image": init_image,
|
114
|
-
"aspect_ratio": "match_input_image",
|
115
|
-
"output_format": "jpg",
|
116
|
-
"safety_tolerance": 2
|
117
|
-
}
|
118
|
-
else:
|
119
|
-
# 默认参数
|
120
|
-
input_data = {
|
121
|
-
"prompt": prompt,
|
122
|
-
"image": init_image,
|
123
|
-
"strength": strength,
|
124
|
-
"num_inference_steps": num_inference_steps,
|
125
|
-
"guidance_scale": guidance_scale
|
126
|
-
}
|
127
|
-
|
128
|
-
if negative_prompt:
|
129
|
-
input_data["negative_prompt"] = negative_prompt
|
130
|
-
if seed:
|
131
|
-
input_data["seed"] = seed
|
132
|
-
|
133
|
-
return await self._generate_internal(input_data)
|
134
|
-
|
135
|
-
async def instant_id_generation(
|
136
|
-
self,
|
137
|
-
prompt: str,
|
138
|
-
face_image: Union[str, Any],
|
139
|
-
negative_prompt: Optional[str] = None,
|
140
|
-
num_inference_steps: int = 30,
|
141
|
-
guidance_scale: float = 5.0,
|
142
|
-
seed: Optional[int] = None,
|
143
|
-
identitynet_strength_ratio: float = 0.8,
|
144
|
-
adapter_strength_ratio: float = 0.8
|
145
|
-
) -> Dict[str, Any]:
|
146
|
-
"""InstantID人脸一致性生成"""
|
147
|
-
|
148
|
-
if "instant-id" in self.model_name:
|
149
|
-
input_data = {
|
150
|
-
"prompt": prompt,
|
151
|
-
"image": face_image,
|
152
|
-
"guidance_scale": guidance_scale,
|
153
|
-
"num_inference_steps": num_inference_steps,
|
154
|
-
"identitynet_strength_ratio": identitynet_strength_ratio,
|
155
|
-
"adapter_strength_ratio": adapter_strength_ratio
|
156
|
-
}
|
157
|
-
|
158
|
-
if negative_prompt:
|
159
|
-
input_data["negative_prompt"] = negative_prompt
|
160
|
-
if seed:
|
161
|
-
input_data["seed"] = seed
|
162
|
-
else:
|
163
|
-
# 默认InstantID参数
|
164
|
-
input_data = {
|
165
|
-
"prompt": prompt,
|
166
|
-
"face_image": face_image,
|
167
|
-
"negative_prompt": negative_prompt or "",
|
168
|
-
"num_inference_steps": num_inference_steps,
|
169
|
-
"guidance_scale": guidance_scale,
|
170
|
-
"identitynet_strength_ratio": identitynet_strength_ratio,
|
171
|
-
"adapter_strength_ratio": adapter_strength_ratio
|
172
|
-
}
|
173
|
-
|
174
|
-
if seed:
|
175
|
-
input_data["seed"] = seed
|
176
|
-
|
177
|
-
return await self._generate_internal(input_data)
|
178
|
-
|
179
|
-
async def consistent_character_generation(
|
180
|
-
self,
|
181
|
-
subject: Union[str, Any],
|
182
|
-
prompt: Optional[str] = None,
|
183
|
-
negative_prompt: Optional[str] = None,
|
184
|
-
number_of_images: int = 4,
|
185
|
-
disable_safety_checker: bool = False
|
186
|
-
) -> Dict[str, Any]:
|
187
|
-
"""一致性角色生成 - 生成同一角色的多种姿态和表情"""
|
188
|
-
|
189
|
-
if "consistent-character" in self.model_name:
|
190
|
-
input_data = {
|
191
|
-
"subject": subject,
|
192
|
-
"number_of_images": number_of_images,
|
193
|
-
"disable_safety_checker": disable_safety_checker
|
194
|
-
}
|
195
|
-
|
196
|
-
if prompt:
|
197
|
-
input_data["prompt"] = prompt
|
198
|
-
if negative_prompt:
|
199
|
-
input_data["negative_prompt"] = negative_prompt
|
81
|
+
"""Image-to-image generation - delegates to appropriate service"""
|
82
|
+
if hasattr(self._delegate_service, 'image_to_image'):
|
83
|
+
return await self._delegate_service.image_to_image(
|
84
|
+
prompt, init_image, strength, negative_prompt,
|
85
|
+
num_inference_steps, guidance_scale, seed
|
86
|
+
)
|
200
87
|
else:
|
201
|
-
|
202
|
-
input_data = {
|
203
|
-
"subject_image": subject,
|
204
|
-
"prompt": prompt or "portrait, different poses and expressions",
|
205
|
-
"negative_prompt": negative_prompt or "low quality, blurry",
|
206
|
-
"num_images": number_of_images
|
207
|
-
}
|
208
|
-
|
209
|
-
return await self._generate_internal(input_data)
|
88
|
+
raise NotImplementedError(f"image_to_image not supported by {type(self._delegate_service).__name__}")
|
210
89
|
|
211
|
-
async def
|
90
|
+
async def generate_sticker(
|
212
91
|
self,
|
213
92
|
prompt: str,
|
214
|
-
|
215
|
-
num_outputs: int = 1,
|
216
|
-
aspect_ratio: str = "1:1",
|
217
|
-
output_format: str = "jpg",
|
218
|
-
guidance_scale: float = 3.5,
|
219
|
-
output_quality: int = 90,
|
220
|
-
num_inference_steps: int = 28,
|
221
|
-
disable_safety_checker: bool = False
|
93
|
+
**kwargs
|
222
94
|
) -> Dict[str, Any]:
|
223
|
-
"""
|
224
|
-
|
225
|
-
|
226
|
-
input_data = {
|
227
|
-
"prompt": prompt,
|
228
|
-
"lora_scale": lora_scale,
|
229
|
-
"num_outputs": num_outputs,
|
230
|
-
"aspect_ratio": aspect_ratio,
|
231
|
-
"output_format": output_format,
|
232
|
-
"guidance_scale": guidance_scale,
|
233
|
-
"output_quality": output_quality,
|
234
|
-
"num_inference_steps": num_inference_steps,
|
235
|
-
"disable_safety_checker": disable_safety_checker
|
236
|
-
}
|
95
|
+
"""Generate sticker - delegates to sticker maker service"""
|
96
|
+
if hasattr(self._delegate_service, 'generate_sticker'):
|
97
|
+
return await self._delegate_service.generate_sticker(prompt, **kwargs)
|
237
98
|
else:
|
238
|
-
|
239
|
-
input_data = {
|
240
|
-
"prompt": prompt,
|
241
|
-
"lora_strength": lora_scale,
|
242
|
-
"num_images": num_outputs,
|
243
|
-
"guidance_scale": guidance_scale,
|
244
|
-
"num_inference_steps": num_inference_steps
|
245
|
-
}
|
246
|
-
|
247
|
-
return await self._generate_internal(input_data)
|
99
|
+
raise NotImplementedError(f"generate_sticker not supported by {type(self._delegate_service).__name__}")
|
248
100
|
|
249
|
-
async def
|
101
|
+
async def face_swap(
|
250
102
|
self,
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
num_inference_steps: int = 20,
|
255
|
-
guidance_scale: float = 10.0,
|
256
|
-
strength: float = 0.55,
|
257
|
-
hdr: float = 0.0,
|
258
|
-
seed: Optional[int] = None
|
103
|
+
swap_image: Union[str, Any],
|
104
|
+
target_image: Union[str, Any],
|
105
|
+
**kwargs
|
259
106
|
) -> Dict[str, Any]:
|
260
|
-
"""
|
261
|
-
|
262
|
-
|
263
|
-
input_data = {
|
264
|
-
"image": image,
|
265
|
-
"scale": scale,
|
266
|
-
"scheduler": scheduler,
|
267
|
-
"num_inference_steps": num_inference_steps,
|
268
|
-
"guidance_scale": guidance_scale,
|
269
|
-
"strength": strength,
|
270
|
-
"hdr": hdr
|
271
|
-
}
|
272
|
-
|
273
|
-
if seed:
|
274
|
-
input_data["seed"] = seed
|
107
|
+
"""Face swap - delegates to face swap service"""
|
108
|
+
if hasattr(self._delegate_service, 'face_swap'):
|
109
|
+
return await self._delegate_service.face_swap(swap_image, target_image, **kwargs)
|
275
110
|
else:
|
276
|
-
|
277
|
-
input_data = {
|
278
|
-
"image": image,
|
279
|
-
"upscale_factor": scale,
|
280
|
-
"num_inference_steps": num_inference_steps,
|
281
|
-
"guidance_scale": guidance_scale,
|
282
|
-
"denoising_strength": strength
|
283
|
-
}
|
284
|
-
|
285
|
-
if seed:
|
286
|
-
input_data["seed"] = seed
|
287
|
-
|
288
|
-
return await self._generate_internal(input_data)
|
289
|
-
|
290
|
-
async def _generate_internal(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
291
|
-
"""内部生成方法"""
|
292
|
-
try:
|
293
|
-
logger.info(f"开始使用模型 {self.model_name} 生成图像")
|
294
|
-
|
295
|
-
# 调用 Replicate API
|
296
|
-
output = await replicate.async_run(self.model_name, input=input_data)
|
297
|
-
|
298
|
-
# 处理输出 - 转换FileOutput对象为URL字符串
|
299
|
-
if isinstance(output, list):
|
300
|
-
raw_urls = output
|
301
|
-
else:
|
302
|
-
raw_urls = [output]
|
303
|
-
|
304
|
-
# 转换为字符串URL
|
305
|
-
urls = []
|
306
|
-
for url in raw_urls:
|
307
|
-
if hasattr(url, 'url'):
|
308
|
-
urls.append(str(url.url)) # type: ignore
|
309
|
-
else:
|
310
|
-
urls.append(str(url))
|
311
|
-
|
312
|
-
# 更新统计
|
313
|
-
self.last_generation_count = len(urls)
|
314
|
-
self.total_generation_count += len(urls)
|
315
|
-
|
316
|
-
# 计算成本
|
317
|
-
cost = self._calculate_cost(len(urls))
|
318
|
-
|
319
|
-
# Track billing information
|
320
|
-
await self._track_usage(
|
321
|
-
service_type="image_generation",
|
322
|
-
operation="image_generation",
|
323
|
-
input_tokens=0,
|
324
|
-
output_tokens=0,
|
325
|
-
input_units=1, # Input prompt
|
326
|
-
output_units=len(urls), # Generated images count
|
327
|
-
metadata={
|
328
|
-
"model": self.model_name,
|
329
|
-
"prompt": input_data.get("prompt", "")[:100], # Truncate to 100 chars
|
330
|
-
"generation_type": "t2i" if "flux-schnell" in self.model_name else "i2i",
|
331
|
-
"image_count": len(urls),
|
332
|
-
"cost_usd": cost
|
333
|
-
}
|
334
|
-
)
|
335
|
-
|
336
|
-
# Return URLs instead of binary data for HTTP API compatibility
|
337
|
-
result = {
|
338
|
-
"urls": urls, # Image URLs - primary response
|
339
|
-
"url": urls[0] if urls else None, # First URL for convenience
|
340
|
-
"format": "jpg", # Default format
|
341
|
-
"width": input_data.get("width", 1024),
|
342
|
-
"height": input_data.get("height", 1024),
|
343
|
-
"seed": input_data.get("seed"),
|
344
|
-
"count": len(urls),
|
345
|
-
"cost_usd": cost,
|
346
|
-
"metadata": {
|
347
|
-
"model": self.model_name,
|
348
|
-
"input": input_data,
|
349
|
-
"generation_count": len(urls)
|
350
|
-
}
|
351
|
-
}
|
352
|
-
|
353
|
-
logger.info(f"图像生成完成: {len(urls)} 张图像, 成本: ${cost:.6f}")
|
354
|
-
return result
|
355
|
-
|
356
|
-
except Exception as e:
|
357
|
-
logger.error(f"图像生成失败: {e}")
|
358
|
-
raise
|
359
|
-
|
360
|
-
def _calculate_cost(self, image_count: int) -> float:
|
361
|
-
"""计算生成成本"""
|
362
|
-
from isa_model.core.models.model_manager import ModelManager
|
363
|
-
|
364
|
-
manager = ModelManager()
|
365
|
-
|
366
|
-
if "flux-schnell" in self.model_name:
|
367
|
-
# $3 per 1000 images
|
368
|
-
return (image_count / 1000) * 3.0
|
369
|
-
elif "flux-kontext-pro" in self.model_name:
|
370
|
-
# $0.04 per image
|
371
|
-
return image_count * 0.04
|
372
|
-
else:
|
373
|
-
# 使用 ModelManager 的定价
|
374
|
-
pricing = manager.get_model_pricing("replicate", self.model_name)
|
375
|
-
return (image_count / 1000) * pricing.get("input", 0.0)
|
376
|
-
|
377
|
-
async def generate_images(
|
378
|
-
self,
|
379
|
-
prompt: str,
|
380
|
-
num_images: int = 1,
|
381
|
-
negative_prompt: Optional[str] = None,
|
382
|
-
width: int = 512,
|
383
|
-
height: int = 512,
|
384
|
-
num_inference_steps: int = 4,
|
385
|
-
guidance_scale: float = 7.5,
|
386
|
-
seed: Optional[int] = None
|
387
|
-
) -> List[Dict[str, Any]]:
|
388
|
-
"""生成多张图像"""
|
389
|
-
results = []
|
390
|
-
for i in range(num_images):
|
391
|
-
current_seed = seed + i if seed else None
|
392
|
-
result = await self.generate_image(
|
393
|
-
prompt, negative_prompt, width, height,
|
394
|
-
num_inference_steps, guidance_scale, current_seed
|
395
|
-
)
|
396
|
-
results.append(result)
|
397
|
-
return results
|
398
|
-
|
399
|
-
async def _download_image(self, url: str, save_path: str) -> None:
|
400
|
-
"""下载图像并保存"""
|
401
|
-
try:
|
402
|
-
async with aiohttp.ClientSession() as session:
|
403
|
-
async with session.get(url) as response:
|
404
|
-
response.raise_for_status()
|
405
|
-
content = await response.read()
|
406
|
-
with Image.open(BytesIO(content)) as img:
|
407
|
-
img.save(save_path)
|
408
|
-
except Exception as e:
|
409
|
-
logger.error(f"下载图像时出错: {url}, {e}")
|
410
|
-
raise
|
111
|
+
raise NotImplementedError(f"face_swap not supported by {type(self._delegate_service).__name__}")
|
411
112
|
|
113
|
+
# Delegation methods for common functionality
|
412
114
|
def get_generation_stats(self) -> Dict[str, Any]:
|
413
|
-
"""
|
414
|
-
|
415
|
-
if "flux-schnell" in self.model_name:
|
416
|
-
total_cost = (self.total_generation_count / 1000) * 3.0
|
417
|
-
elif "flux-kontext-pro" in self.model_name:
|
418
|
-
total_cost = self.total_generation_count * 0.04
|
419
|
-
|
420
|
-
return {
|
421
|
-
"last_generation_count": self.last_generation_count,
|
422
|
-
"total_generation_count": self.total_generation_count,
|
423
|
-
"total_cost_usd": total_cost,
|
424
|
-
"model": self.model_name
|
425
|
-
}
|
426
|
-
|
427
|
-
def get_supported_sizes(self) -> List[Dict[str, int]]:
|
428
|
-
"""获取支持的图像尺寸"""
|
429
|
-
if "flux" in self.model_name:
|
430
|
-
return [
|
431
|
-
{"width": 512, "height": 512},
|
432
|
-
{"width": 768, "height": 768},
|
433
|
-
{"width": 1024, "height": 1024},
|
434
|
-
]
|
435
|
-
else:
|
436
|
-
return [
|
437
|
-
{"width": 512, "height": 512},
|
438
|
-
{"width": 768, "height": 768},
|
439
|
-
{"width": 1024, "height": 1024},
|
440
|
-
{"width": 768, "height": 1344},
|
441
|
-
{"width": 1344, "height": 768},
|
442
|
-
]
|
115
|
+
"""Get generation statistics - delegates to service"""
|
116
|
+
return self._delegate_service.get_generation_stats()
|
443
117
|
|
444
118
|
def get_model_info(self) -> Dict[str, Any]:
|
445
|
-
"""
|
446
|
-
|
447
|
-
return {
|
448
|
-
"name": self.model_name,
|
449
|
-
"type": "t2i",
|
450
|
-
"cost_per_1000_images": 3.0,
|
451
|
-
"supports_negative_prompt": False,
|
452
|
-
"supports_img2img": False,
|
453
|
-
"max_steps": 4
|
454
|
-
}
|
455
|
-
elif "flux-kontext-pro" in self.model_name:
|
456
|
-
return {
|
457
|
-
"name": self.model_name,
|
458
|
-
"type": "i2i",
|
459
|
-
"cost_per_image": 0.04,
|
460
|
-
"supports_negative_prompt": False,
|
461
|
-
"supports_img2img": True,
|
462
|
-
"max_width": 1024,
|
463
|
-
"max_height": 1024
|
464
|
-
}
|
465
|
-
else:
|
466
|
-
return {
|
467
|
-
"name": self.model_name,
|
468
|
-
"type": "general",
|
469
|
-
"supports_negative_prompt": True,
|
470
|
-
"supports_img2img": True
|
471
|
-
}
|
119
|
+
"""Get model information - delegates to service"""
|
120
|
+
return self._delegate_service.get_model_info()
|
472
121
|
|
473
122
|
async def load(self) -> None:
|
474
|
-
"""
|
475
|
-
|
476
|
-
raise ValueError("缺少 Replicate API 令牌")
|
477
|
-
logger.info(f"Replicate 图像生成服务已准备就绪,使用模型: {self.model_name}")
|
123
|
+
"""Load service - delegates to service"""
|
124
|
+
await self._delegate_service.load()
|
478
125
|
|
479
126
|
async def unload(self) -> None:
|
480
|
-
"""
|
481
|
-
|
127
|
+
"""Unload service - delegates to service"""
|
128
|
+
await self._delegate_service.unload()
|
482
129
|
|
483
130
|
async def close(self):
|
484
|
-
"""
|
485
|
-
await self.
|
131
|
+
"""Close service - delegates to service"""
|
132
|
+
await self._delegate_service.close()
|
133
|
+
|
134
|
+
# Abstract method implementations for delegation
|
135
|
+
async def generate_images(self, prompt: str, num_images: int = 1, negative_prompt=None, width: int = 512, height: int = 512, num_inference_steps: int = 20, guidance_scale: float = 7.5, seed=None) -> list[Dict[str, Any]]:
|
136
|
+
"""Generate multiple images - delegates to service"""
|
137
|
+
if hasattr(self._delegate_service, 'generate_images'):
|
138
|
+
return await self._delegate_service.generate_images(prompt, num_images, negative_prompt, width, height, num_inference_steps, guidance_scale, seed)
|
139
|
+
else:
|
140
|
+
raise NotImplementedError(f"generate_images not supported by {type(self._delegate_service).__name__}")
|
141
|
+
|
142
|
+
def get_supported_sizes(self) -> list[Dict[str, int]]:
|
143
|
+
"""Get supported sizes - delegates to service"""
|
144
|
+
if hasattr(self._delegate_service, 'get_supported_sizes'):
|
145
|
+
return self._delegate_service.get_supported_sizes()
|
146
|
+
else:
|
147
|
+
return [{"width": 512, "height": 512}, {"width": 1024, "height": 1024}]
|
486
148
|
|