isa-model 0.3.5__py3-none-any.whl → 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isa_model/__init__.py +30 -1
- isa_model/client.py +937 -0
- isa_model/core/config/__init__.py +16 -0
- isa_model/core/config/config_manager.py +514 -0
- isa_model/core/config.py +426 -0
- isa_model/core/models/model_billing_tracker.py +476 -0
- isa_model/core/models/model_manager.py +399 -0
- isa_model/core/{storage/supabase_storage.py → models/model_repo.py} +72 -73
- isa_model/core/pricing_manager.py +426 -0
- isa_model/core/services/__init__.py +19 -0
- isa_model/core/services/intelligent_model_selector.py +547 -0
- isa_model/core/types.py +291 -0
- isa_model/deployment/__init__.py +2 -0
- isa_model/deployment/cloud/modal/isa_vision_doc_service.py +157 -3
- isa_model/deployment/cloud/modal/isa_vision_table_service.py +532 -0
- isa_model/deployment/cloud/modal/isa_vision_ui_service.py +104 -3
- isa_model/deployment/cloud/modal/register_models.py +321 -0
- isa_model/deployment/runtime/deployed_service.py +338 -0
- isa_model/deployment/services/__init__.py +9 -0
- isa_model/deployment/services/auto_deploy_vision_service.py +538 -0
- isa_model/deployment/services/model_service.py +332 -0
- isa_model/deployment/services/service_monitor.py +356 -0
- isa_model/deployment/services/service_registry.py +527 -0
- isa_model/deployment/services/simple_auto_deploy_vision_service.py +275 -0
- isa_model/eval/__init__.py +80 -44
- isa_model/eval/config/__init__.py +10 -0
- isa_model/eval/config/evaluation_config.py +108 -0
- isa_model/eval/evaluators/__init__.py +18 -0
- isa_model/eval/evaluators/base_evaluator.py +503 -0
- isa_model/eval/evaluators/llm_evaluator.py +472 -0
- isa_model/eval/factory.py +417 -709
- isa_model/eval/infrastructure/__init__.py +24 -0
- isa_model/eval/infrastructure/experiment_tracker.py +466 -0
- isa_model/eval/metrics.py +191 -21
- isa_model/inference/ai_factory.py +257 -601
- isa_model/inference/services/audio/base_stt_service.py +65 -1
- isa_model/inference/services/audio/base_tts_service.py +75 -1
- isa_model/inference/services/audio/openai_stt_service.py +189 -151
- isa_model/inference/services/audio/openai_tts_service.py +12 -10
- isa_model/inference/services/audio/replicate_tts_service.py +61 -56
- isa_model/inference/services/base_service.py +55 -17
- isa_model/inference/services/embedding/base_embed_service.py +65 -1
- isa_model/inference/services/embedding/ollama_embed_service.py +103 -43
- isa_model/inference/services/embedding/openai_embed_service.py +8 -10
- isa_model/inference/services/helpers/stacked_config.py +148 -0
- isa_model/inference/services/img/__init__.py +18 -0
- isa_model/inference/services/{vision → img}/base_image_gen_service.py +80 -1
- isa_model/inference/services/{stacked → img}/flux_professional_service.py +25 -1
- isa_model/inference/services/{stacked → img/helpers}/base_stacked_service.py +40 -35
- isa_model/inference/services/{vision → img}/replicate_image_gen_service.py +44 -31
- isa_model/inference/services/llm/__init__.py +3 -3
- isa_model/inference/services/llm/base_llm_service.py +492 -40
- isa_model/inference/services/llm/helpers/llm_prompts.py +258 -0
- isa_model/inference/services/llm/helpers/llm_utils.py +280 -0
- isa_model/inference/services/llm/ollama_llm_service.py +51 -17
- isa_model/inference/services/llm/openai_llm_service.py +70 -19
- isa_model/inference/services/llm/yyds_llm_service.py +24 -23
- isa_model/inference/services/vision/__init__.py +38 -4
- isa_model/inference/services/vision/base_vision_service.py +218 -117
- isa_model/inference/services/vision/{isA_vision_service.py → disabled/isA_vision_service.py} +98 -0
- isa_model/inference/services/{stacked → vision}/doc_analysis_service.py +1 -1
- isa_model/inference/services/vision/helpers/base_stacked_service.py +274 -0
- isa_model/inference/services/vision/helpers/image_utils.py +272 -3
- isa_model/inference/services/vision/helpers/vision_prompts.py +297 -0
- isa_model/inference/services/vision/openai_vision_service.py +104 -307
- isa_model/inference/services/vision/replicate_vision_service.py +140 -325
- isa_model/inference/services/{stacked → vision}/ui_analysis_service.py +2 -498
- isa_model/scripts/register_models.py +370 -0
- isa_model/scripts/register_models_with_embeddings.py +510 -0
- isa_model/serving/api/fastapi_server.py +6 -1
- isa_model/serving/api/routes/unified.py +274 -0
- {isa_model-0.3.5.dist-info → isa_model-0.3.7.dist-info}/METADATA +4 -1
- {isa_model-0.3.5.dist-info → isa_model-0.3.7.dist-info}/RECORD +78 -53
- isa_model/config/__init__.py +0 -9
- isa_model/config/config_manager.py +0 -213
- isa_model/core/model_manager.py +0 -213
- isa_model/core/model_registry.py +0 -375
- isa_model/core/vision_models_init.py +0 -116
- isa_model/inference/billing_tracker.py +0 -406
- isa_model/inference/services/llm/triton_llm_service.py +0 -481
- isa_model/inference/services/stacked/__init__.py +0 -26
- isa_model/inference/services/stacked/config.py +0 -426
- isa_model/inference/services/vision/ollama_vision_service.py +0 -194
- /isa_model/core/{model_storage.py → models/model_storage.py} +0 -0
- /isa_model/inference/services/{vision → embedding}/helpers/text_splitter.py +0 -0
- /isa_model/inference/services/llm/{llm_adapter.py → helpers/llm_adapter.py} +0 -0
- {isa_model-0.3.5.dist-info → isa_model-0.3.7.dist-info}/WHEEL +0 -0
- {isa_model-0.3.5.dist-info → isa_model-0.3.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,510 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
增强的模型注册脚本
|
4
|
+
从 YAML 配置文件中读取模型定义,注册到 Supabase 数据库,并生成向量嵌入
|
5
|
+
|
6
|
+
Features:
|
7
|
+
- 支持注册到 Supabase 的 models 和 model_capabilities 表
|
8
|
+
- 自动生成模型描述的向量嵌入并存储到 model_embedding 表
|
9
|
+
- 支持批量注册、更新和验证
|
10
|
+
- 支持新的 omni 模型类型
|
11
|
+
|
12
|
+
Usage:
|
13
|
+
python register_models_with_embeddings.py --all # 注册所有providers的模型
|
14
|
+
python register_models_with_embeddings.py --provider openai # 只注册openai的模型
|
15
|
+
python register_models_with_embeddings.py --dry-run # 仅验证配置,不实际注册
|
16
|
+
python register_models_with_embeddings.py --update # 更新已存在的模型
|
17
|
+
python register_models_with_embeddings.py --embeddings-only # 只生成嵌入,不注册模型
|
18
|
+
"""
|
19
|
+
|
20
|
+
import argparse
|
21
|
+
import asyncio
|
22
|
+
import logging
|
23
|
+
import sys
|
24
|
+
import json
|
25
|
+
from pathlib import Path
|
26
|
+
from typing import Dict, List, Any, Optional
|
27
|
+
import yaml
|
28
|
+
|
29
|
+
# 添加项目根目录到Python路径
|
30
|
+
project_root = Path(__file__).parent.parent
|
31
|
+
sys.path.insert(0, str(project_root))
|
32
|
+
|
33
|
+
# 配置日志
|
34
|
+
logging.basicConfig(
|
35
|
+
level=logging.INFO,
|
36
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
37
|
+
)
|
38
|
+
logger = logging.getLogger(__name__)
|
39
|
+
|
40
|
+
try:
|
41
|
+
from isa_model.core.storage.supabase_storage import SupabaseModelRegistry
|
42
|
+
from isa_model.inference.ai_factory import AIFactory
|
43
|
+
SUPABASE_AVAILABLE = True
|
44
|
+
except ImportError as e:
|
45
|
+
logger.error(f"Required dependencies not available: {e}")
|
46
|
+
SUPABASE_AVAILABLE = False
|
47
|
+
|
48
|
+
|
49
|
+
class EnhancedModelRegistrationScript:
|
50
|
+
"""增强的模型注册脚本,支持 Supabase 和向量嵌入"""
|
51
|
+
|
52
|
+
def __init__(self):
|
53
|
+
if not SUPABASE_AVAILABLE:
|
54
|
+
raise ImportError("Supabase and other required dependencies not available")
|
55
|
+
|
56
|
+
self.registry = SupabaseModelRegistry()
|
57
|
+
self.ai_factory = AIFactory.get_instance()
|
58
|
+
self.embedding_service = None
|
59
|
+
self.config_dir = project_root / "config" / "models"
|
60
|
+
|
61
|
+
# 支持的模型类型(包含新的 omni)
|
62
|
+
self.supported_model_types = {
|
63
|
+
"vision", "image", "audio", "text", "embedding", "omni"
|
64
|
+
}
|
65
|
+
|
66
|
+
# 支持的能力
|
67
|
+
self.supported_capabilities = {
|
68
|
+
"text_generation", "chat", "reasoning", "code_generation",
|
69
|
+
"image_analysis", "image_understanding", "ocr", "ui_detection",
|
70
|
+
"table_detection", "image_generation", "style_transfer",
|
71
|
+
"text_to_speech", "speech_to_text", "audio_transcription",
|
72
|
+
"text_embedding", "image_enhancement"
|
73
|
+
}
|
74
|
+
|
75
|
+
async def initialize_embedding_service(self):
|
76
|
+
"""初始化嵌入服务"""
|
77
|
+
try:
|
78
|
+
self.embedding_service = self.ai_factory.get_embed("text-embedding-3-small", "openai")
|
79
|
+
logger.info("Embedding service initialized")
|
80
|
+
except Exception as e:
|
81
|
+
logger.warning(f"Failed to initialize embedding service: {e}")
|
82
|
+
self.embedding_service = None
|
83
|
+
|
84
|
+
def load_provider_config(self, provider: str) -> Dict[str, Any]:
|
85
|
+
"""加载 provider 的 YAML 配置"""
|
86
|
+
config_file = self.config_dir / f"{provider}_models.yaml"
|
87
|
+
|
88
|
+
if not config_file.exists():
|
89
|
+
raise FileNotFoundError(f"Configuration file not found: {config_file}")
|
90
|
+
|
91
|
+
try:
|
92
|
+
with open(config_file, 'r', encoding='utf-8') as f:
|
93
|
+
config = yaml.safe_load(f)
|
94
|
+
|
95
|
+
logger.info(f"Loaded configuration for provider '{provider}' with {len(config.get('models', []))} models")
|
96
|
+
return config
|
97
|
+
|
98
|
+
except yaml.YAMLError as e:
|
99
|
+
raise ValueError(f"Invalid YAML in {config_file}: {e}")
|
100
|
+
|
101
|
+
def validate_model_config(self, model_config: Dict[str, Any], provider: str) -> bool:
|
102
|
+
"""验证单个模型配置"""
|
103
|
+
required_fields = ["model_id", "model_type", "capabilities", "metadata"]
|
104
|
+
|
105
|
+
for field in required_fields:
|
106
|
+
if field not in model_config:
|
107
|
+
logger.error(f"Missing required field '{field}' in model config: {model_config}")
|
108
|
+
return False
|
109
|
+
|
110
|
+
# 验证 model_type
|
111
|
+
model_type = model_config["model_type"]
|
112
|
+
if model_type not in self.supported_model_types:
|
113
|
+
logger.error(f"Invalid model_type '{model_type}' for model {model_config['model_id']}")
|
114
|
+
logger.info(f"Supported types: {', '.join(self.supported_model_types)}")
|
115
|
+
return False
|
116
|
+
|
117
|
+
# 验证 capabilities
|
118
|
+
capabilities = model_config["capabilities"]
|
119
|
+
if not isinstance(capabilities, list) or not capabilities:
|
120
|
+
logger.error(f"Capabilities must be a non-empty list for model {model_config['model_id']}")
|
121
|
+
return False
|
122
|
+
|
123
|
+
for cap in capabilities:
|
124
|
+
if cap not in self.supported_capabilities:
|
125
|
+
logger.warning(f"Unknown capability '{cap}' for model {model_config['model_id']}")
|
126
|
+
# 不失败,只是警告
|
127
|
+
|
128
|
+
# 验证 metadata
|
129
|
+
metadata = model_config["metadata"]
|
130
|
+
required_metadata = ["description", "performance_tier", "provider_model_name"]
|
131
|
+
|
132
|
+
for field in required_metadata:
|
133
|
+
if field not in metadata:
|
134
|
+
logger.error(f"Missing required metadata field '{field}' for model {model_config['model_id']}")
|
135
|
+
return False
|
136
|
+
|
137
|
+
return True
|
138
|
+
|
139
|
+
def validate_provider_config(self, config: Dict[str, Any], provider: str) -> bool:
|
140
|
+
"""验证 provider 的完整配置"""
|
141
|
+
if "provider" not in config:
|
142
|
+
logger.error(f"Missing 'provider' field in config")
|
143
|
+
return False
|
144
|
+
|
145
|
+
if config["provider"] != provider:
|
146
|
+
logger.error(f"Provider mismatch: expected '{provider}', got '{config['provider']}'")
|
147
|
+
return False
|
148
|
+
|
149
|
+
if "models" not in config:
|
150
|
+
logger.error(f"Missing 'models' field in config")
|
151
|
+
return False
|
152
|
+
|
153
|
+
models = config["models"]
|
154
|
+
if not isinstance(models, list) or not models:
|
155
|
+
logger.error(f"Models must be a non-empty list")
|
156
|
+
return False
|
157
|
+
|
158
|
+
# 验证每个模型
|
159
|
+
valid_count = 0
|
160
|
+
for i, model_config in enumerate(models):
|
161
|
+
if self.validate_model_config(model_config, provider):
|
162
|
+
valid_count += 1
|
163
|
+
else:
|
164
|
+
logger.error(f"Invalid configuration for model #{i+1}")
|
165
|
+
|
166
|
+
if valid_count != len(models):
|
167
|
+
logger.error(f"Only {valid_count}/{len(models)} models have valid configuration")
|
168
|
+
return False
|
169
|
+
|
170
|
+
logger.info(f"All {len(models)} models in {provider} configuration are valid")
|
171
|
+
return True
|
172
|
+
|
173
|
+
def create_search_text(self, model_config: Dict[str, Any]) -> str:
|
174
|
+
"""创建用于嵌入的搜索文本"""
|
175
|
+
metadata = model_config.get("metadata", {})
|
176
|
+
|
177
|
+
# 组合描述、能力和专业任务
|
178
|
+
parts = []
|
179
|
+
|
180
|
+
# 添加描述
|
181
|
+
description = metadata.get("description", "")
|
182
|
+
if description:
|
183
|
+
parts.append(description)
|
184
|
+
|
185
|
+
# 添加能力
|
186
|
+
capabilities = model_config.get("capabilities", [])
|
187
|
+
if capabilities:
|
188
|
+
parts.append(f"Capabilities: {', '.join(capabilities)}")
|
189
|
+
|
190
|
+
# 添加专业任务
|
191
|
+
specialized_tasks = metadata.get("specialized_tasks", [])
|
192
|
+
if specialized_tasks:
|
193
|
+
parts.append(f"Tasks: {', '.join(specialized_tasks)}")
|
194
|
+
|
195
|
+
# 添加性能层级
|
196
|
+
performance_tier = metadata.get("performance_tier", "")
|
197
|
+
if performance_tier:
|
198
|
+
parts.append(f"Performance: {performance_tier}")
|
199
|
+
|
200
|
+
return " ".join(parts)
|
201
|
+
|
202
|
+
async def create_embedding(self, search_text: str) -> Optional[List[float]]:
|
203
|
+
"""创建文本的向量嵌入"""
|
204
|
+
if not self.embedding_service:
|
205
|
+
logger.warning("Embedding service not available")
|
206
|
+
return None
|
207
|
+
|
208
|
+
try:
|
209
|
+
embedding = await self.embedding_service.create_text_embedding(search_text)
|
210
|
+
return embedding
|
211
|
+
except Exception as e:
|
212
|
+
logger.error(f"Failed to create embedding: {e}")
|
213
|
+
return None
|
214
|
+
|
215
|
+
async def register_model(self, model_config: Dict[str, Any], provider: str, update: bool = False, embeddings_only: bool = False) -> bool:
|
216
|
+
"""注册单个模型到 Supabase"""
|
217
|
+
try:
|
218
|
+
model_id = model_config["model_id"]
|
219
|
+
model_type = model_config["model_type"]
|
220
|
+
capabilities = model_config["capabilities"]
|
221
|
+
metadata = model_config["metadata"].copy()
|
222
|
+
metadata["provider"] = provider
|
223
|
+
|
224
|
+
# 检查模型是否已存在
|
225
|
+
existing_model = self.registry.get_model_info(model_id)
|
226
|
+
|
227
|
+
if existing_model and not update and not embeddings_only:
|
228
|
+
logger.warning(f"Model {model_id} already exists. Use --update to overwrite.")
|
229
|
+
return False
|
230
|
+
|
231
|
+
success = True
|
232
|
+
|
233
|
+
# 注册模型到基础表(除非只生成嵌入)
|
234
|
+
if not embeddings_only:
|
235
|
+
if existing_model and update:
|
236
|
+
logger.info(f"Updating existing model {model_id}")
|
237
|
+
else:
|
238
|
+
logger.info(f"Registering new model {model_id}")
|
239
|
+
|
240
|
+
reg_success = self.registry.register_model(
|
241
|
+
model_id=model_id,
|
242
|
+
model_type=model_type,
|
243
|
+
capabilities=capabilities,
|
244
|
+
metadata=metadata
|
245
|
+
)
|
246
|
+
|
247
|
+
if not reg_success:
|
248
|
+
logger.error(f"❌ Failed to register model {model_id} to basic tables")
|
249
|
+
success = False
|
250
|
+
|
251
|
+
# 生成并存储嵌入
|
252
|
+
if self.embedding_service:
|
253
|
+
try:
|
254
|
+
search_text = self.create_search_text(model_config)
|
255
|
+
embedding = await self.create_embedding(search_text)
|
256
|
+
|
257
|
+
if embedding:
|
258
|
+
# 存储到 model_embedding 表
|
259
|
+
embed_success = await self._store_model_embedding(
|
260
|
+
model_id, provider, model_type,
|
261
|
+
metadata.get("description", ""),
|
262
|
+
search_text, metadata, embedding
|
263
|
+
)
|
264
|
+
|
265
|
+
if embed_success:
|
266
|
+
logger.info(f"✅ Created embedding for {model_id}")
|
267
|
+
else:
|
268
|
+
logger.warning(f"⚠️ Failed to store embedding for {model_id}")
|
269
|
+
else:
|
270
|
+
logger.warning(f"⚠️ Failed to create embedding for {model_id}")
|
271
|
+
|
272
|
+
except Exception as e:
|
273
|
+
logger.error(f"❌ Error creating embedding for {model_id}: {e}")
|
274
|
+
|
275
|
+
if success:
|
276
|
+
action = "embedding generated" if embeddings_only else "registered"
|
277
|
+
logger.info(f"✅ Successfully {action} for {model_id}")
|
278
|
+
|
279
|
+
return success
|
280
|
+
|
281
|
+
except Exception as e:
|
282
|
+
logger.error(f"❌ Error processing model {model_config.get('model_id', 'unknown')}: {e}")
|
283
|
+
return False
|
284
|
+
|
285
|
+
async def _store_model_embedding(
|
286
|
+
self,
|
287
|
+
model_id: str,
|
288
|
+
provider: str,
|
289
|
+
model_type: str,
|
290
|
+
description: str,
|
291
|
+
search_text: str,
|
292
|
+
metadata: Dict[str, Any],
|
293
|
+
embedding: List[float]
|
294
|
+
) -> bool:
|
295
|
+
"""存储模型嵌入到数据库"""
|
296
|
+
try:
|
297
|
+
# 这里需要直接访问 Supabase 客户端
|
298
|
+
supabase = self.registry.supabase
|
299
|
+
|
300
|
+
embedding_data = {
|
301
|
+
'model_id': model_id,
|
302
|
+
'provider': provider,
|
303
|
+
'model_type': model_type,
|
304
|
+
'description': description,
|
305
|
+
'search_text': search_text,
|
306
|
+
'metadata': json.dumps(metadata),
|
307
|
+
'embedding': embedding
|
308
|
+
}
|
309
|
+
|
310
|
+
# 插入或更新
|
311
|
+
result = supabase.table('model_embedding').upsert(
|
312
|
+
embedding_data,
|
313
|
+
on_conflict='model_id'
|
314
|
+
).execute()
|
315
|
+
|
316
|
+
return bool(result.data)
|
317
|
+
|
318
|
+
except Exception as e:
|
319
|
+
logger.error(f"Failed to store embedding for {model_id}: {e}")
|
320
|
+
return False
|
321
|
+
|
322
|
+
async def register_provider_models(self, provider: str, dry_run: bool = False, update: bool = False, embeddings_only: bool = False) -> bool:
|
323
|
+
"""注册 provider 的所有模型"""
|
324
|
+
try:
|
325
|
+
# 加载配置
|
326
|
+
config = self.load_provider_config(provider)
|
327
|
+
|
328
|
+
# 验证配置
|
329
|
+
if not self.validate_provider_config(config, provider):
|
330
|
+
logger.error(f"❌ Invalid configuration for provider {provider}")
|
331
|
+
return False
|
332
|
+
|
333
|
+
if dry_run:
|
334
|
+
logger.info(f"✅ Dry run successful for provider {provider} - configuration is valid")
|
335
|
+
return True
|
336
|
+
|
337
|
+
# 初始化嵌入服务
|
338
|
+
if not embeddings_only: # 注册模式需要检查嵌入服务
|
339
|
+
await self.initialize_embedding_service()
|
340
|
+
else: # 只生成嵌入模式,必须有嵌入服务
|
341
|
+
await self.initialize_embedding_service()
|
342
|
+
if not self.embedding_service:
|
343
|
+
logger.error("❌ Embedding service required for embeddings-only mode")
|
344
|
+
return False
|
345
|
+
|
346
|
+
# 注册所有模型
|
347
|
+
models = config["models"]
|
348
|
+
success_count = 0
|
349
|
+
|
350
|
+
action = "generating embeddings for" if embeddings_only else "registering"
|
351
|
+
logger.info(f"🚀 Starting {action} {len(models)} models for provider {provider}")
|
352
|
+
|
353
|
+
for model_config in models:
|
354
|
+
if await self.register_model(model_config, provider, update, embeddings_only):
|
355
|
+
success_count += 1
|
356
|
+
|
357
|
+
# 汇总结果
|
358
|
+
if success_count == len(models):
|
359
|
+
logger.info(f"🎉 All {len(models)} models processed successfully for provider {provider}")
|
360
|
+
else:
|
361
|
+
logger.warning(f"⚠️ Only {success_count}/{len(models)} models processed successfully for provider {provider}")
|
362
|
+
|
363
|
+
return success_count == len(models)
|
364
|
+
|
365
|
+
except Exception as e:
|
366
|
+
logger.error(f"❌ Failed to process models for provider {provider}: {e}")
|
367
|
+
return False
|
368
|
+
|
369
|
+
async def register_all_providers(self, dry_run: bool = False, update: bool = False, embeddings_only: bool = False) -> bool:
|
370
|
+
"""注册所有 providers 的模型"""
|
371
|
+
providers = ["openai", "replicate", "yyds", "ollama"]
|
372
|
+
|
373
|
+
action = "embedding generation" if embeddings_only else "registration"
|
374
|
+
logger.info(f"🚀 Starting {action} for all providers: {', '.join(providers)}")
|
375
|
+
|
376
|
+
overall_success = True
|
377
|
+
results = {}
|
378
|
+
|
379
|
+
for provider in providers:
|
380
|
+
logger.info(f"\n📁 Processing provider: {provider}")
|
381
|
+
try:
|
382
|
+
success = await self.register_provider_models(provider, dry_run, update, embeddings_only)
|
383
|
+
results[provider] = success
|
384
|
+
if not success:
|
385
|
+
overall_success = False
|
386
|
+
except FileNotFoundError:
|
387
|
+
logger.warning(f"⚠️ Configuration file not found for provider {provider}, skipping")
|
388
|
+
results[provider] = None
|
389
|
+
except Exception as e:
|
390
|
+
logger.error(f"❌ Unexpected error with provider {provider}: {e}")
|
391
|
+
results[provider] = False
|
392
|
+
overall_success = False
|
393
|
+
|
394
|
+
# 打印汇总报告
|
395
|
+
logger.info("\n" + "="*60)
|
396
|
+
logger.info("📊 PROCESSING SUMMARY")
|
397
|
+
logger.info("="*60)
|
398
|
+
|
399
|
+
for provider, success in results.items():
|
400
|
+
if success is True:
|
401
|
+
logger.info(f"✅ {provider}: SUCCESS")
|
402
|
+
elif success is False:
|
403
|
+
logger.error(f"❌ {provider}: FAILED")
|
404
|
+
else:
|
405
|
+
logger.warning(f"⚠️ {provider}: SKIPPED (no config)")
|
406
|
+
|
407
|
+
if overall_success:
|
408
|
+
action = "embedding generation" if embeddings_only else "model registration"
|
409
|
+
logger.info(f"\n🎉 All {action} completed successfully!")
|
410
|
+
else:
|
411
|
+
logger.error("\n❌ Some operations failed. Check logs for details.")
|
412
|
+
|
413
|
+
return overall_success
|
414
|
+
|
415
|
+
def list_available_providers(self) -> List[str]:
|
416
|
+
"""列出可用的 provider 配置文件"""
|
417
|
+
providers = []
|
418
|
+
for config_file in self.config_dir.glob("*_models.yaml"):
|
419
|
+
provider = config_file.stem.replace("_models", "")
|
420
|
+
providers.append(provider)
|
421
|
+
return sorted(providers)
|
422
|
+
|
423
|
+
|
424
|
+
async def main():
|
425
|
+
"""主函数"""
|
426
|
+
parser = argparse.ArgumentParser(
|
427
|
+
description="Register models from YAML configuration files to Supabase with vector embeddings",
|
428
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
429
|
+
epilog="""
|
430
|
+
Examples:
|
431
|
+
python register_models_with_embeddings.py --all # Register all providers
|
432
|
+
python register_models_with_embeddings.py --provider openai # Register only OpenAI models
|
433
|
+
python register_models_with_embeddings.py --dry-run --all # Validate all configs
|
434
|
+
python register_models_with_embeddings.py --update --provider openai # Update existing models
|
435
|
+
python register_models_with_embeddings.py --embeddings-only --all # Only generate embeddings
|
436
|
+
"""
|
437
|
+
)
|
438
|
+
|
439
|
+
parser.add_argument("--all", action="store_true",
|
440
|
+
help="Process models from all provider config files")
|
441
|
+
parser.add_argument("--provider", type=str,
|
442
|
+
help="Process models from specific provider (e.g., openai, replicate)")
|
443
|
+
parser.add_argument("--dry-run", action="store_true",
|
444
|
+
help="Validate configuration without actually processing models")
|
445
|
+
parser.add_argument("--update", action="store_true",
|
446
|
+
help="Update existing models if they already exist")
|
447
|
+
parser.add_argument("--embeddings-only", action="store_true",
|
448
|
+
help="Only generate and store embeddings, don't register models")
|
449
|
+
parser.add_argument("--list-providers", action="store_true",
|
450
|
+
help="List available provider configuration files")
|
451
|
+
|
452
|
+
args = parser.parse_args()
|
453
|
+
|
454
|
+
# 检查依赖
|
455
|
+
if not SUPABASE_AVAILABLE:
|
456
|
+
logger.error("❌ Required dependencies not available. Please install supabase-py and other dependencies.")
|
457
|
+
sys.exit(1)
|
458
|
+
|
459
|
+
# 创建注册脚本实例
|
460
|
+
try:
|
461
|
+
script = EnhancedModelRegistrationScript()
|
462
|
+
except Exception as e:
|
463
|
+
logger.error(f"❌ Failed to initialize script: {e}")
|
464
|
+
sys.exit(1)
|
465
|
+
|
466
|
+
# 处理 list-providers
|
467
|
+
if args.list_providers:
|
468
|
+
providers = script.list_available_providers()
|
469
|
+
logger.info("Available provider configurations:")
|
470
|
+
for provider in providers:
|
471
|
+
logger.info(f" - {provider}")
|
472
|
+
return
|
473
|
+
|
474
|
+
# 验证参数
|
475
|
+
if not args.all and not args.provider:
|
476
|
+
parser.error("Must specify either --all or --provider")
|
477
|
+
|
478
|
+
if args.all and args.provider:
|
479
|
+
parser.error("Cannot specify both --all and --provider")
|
480
|
+
|
481
|
+
# 执行处理
|
482
|
+
try:
|
483
|
+
if args.dry_run:
|
484
|
+
logger.info("🔍 Running in DRY-RUN mode - no actual processing will occur")
|
485
|
+
|
486
|
+
if args.embeddings_only:
|
487
|
+
logger.info("🎯 Running in EMBEDDINGS-ONLY mode - only generating vector embeddings")
|
488
|
+
|
489
|
+
if args.all:
|
490
|
+
success = await script.register_all_providers(args.dry_run, args.update, args.embeddings_only)
|
491
|
+
else:
|
492
|
+
success = await script.register_provider_models(args.provider, args.dry_run, args.update, args.embeddings_only)
|
493
|
+
|
494
|
+
if success:
|
495
|
+
logger.info("✅ Script completed successfully")
|
496
|
+
sys.exit(0)
|
497
|
+
else:
|
498
|
+
logger.error("❌ Script completed with errors")
|
499
|
+
sys.exit(1)
|
500
|
+
|
501
|
+
except KeyboardInterrupt:
|
502
|
+
logger.info("\n⏹️ Processing cancelled by user")
|
503
|
+
sys.exit(1)
|
504
|
+
except Exception as e:
|
505
|
+
logger.error(f"❌ Unexpected error: {e}")
|
506
|
+
sys.exit(1)
|
507
|
+
|
508
|
+
|
509
|
+
if __name__ == "__main__":
|
510
|
+
asyncio.run(main())
|
@@ -11,7 +11,7 @@ import time
|
|
11
11
|
import logging
|
12
12
|
from typing import Dict, Any
|
13
13
|
|
14
|
-
from .routes import ui_analysis, vision, llm, health
|
14
|
+
from .routes import ui_analysis, vision, llm, health, unified
|
15
15
|
from .middleware.request_logger import RequestLoggerMiddleware
|
16
16
|
|
17
17
|
logger = logging.getLogger(__name__)
|
@@ -60,6 +60,11 @@ def create_app(config: Dict[str, Any] = None) -> FastAPI:
|
|
60
60
|
|
61
61
|
# Include routers
|
62
62
|
app.include_router(health.router, prefix="/health", tags=["health"])
|
63
|
+
|
64
|
+
# MAIN UNIFIED API - Single endpoint for all AI services
|
65
|
+
app.include_router(unified.router, prefix="/api/v1", tags=["unified-api"])
|
66
|
+
|
67
|
+
# Legacy specific endpoints (kept for backward compatibility)
|
63
68
|
app.include_router(ui_analysis.router, prefix="/ui-analysis", tags=["ui-analysis"])
|
64
69
|
app.include_router(vision.router, prefix="/vision", tags=["vision"])
|
65
70
|
app.include_router(llm.router, prefix="/llm", tags=["llm"])
|