isa-model 0.3.9__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isa_model/__init__.py +1 -1
- isa_model/client.py +732 -565
- isa_model/core/cache/redis_cache.py +401 -0
- isa_model/core/config/config_manager.py +53 -10
- isa_model/core/config.py +1 -1
- isa_model/core/database/__init__.py +1 -0
- isa_model/core/database/migrations.py +277 -0
- isa_model/core/database/supabase_client.py +123 -0
- isa_model/core/models/__init__.py +37 -0
- isa_model/core/models/model_billing_tracker.py +60 -88
- isa_model/core/models/model_manager.py +36 -18
- isa_model/core/models/model_repo.py +44 -38
- isa_model/core/models/model_statistics_tracker.py +234 -0
- isa_model/core/models/model_storage.py +0 -1
- isa_model/core/models/model_version_manager.py +959 -0
- isa_model/core/pricing_manager.py +2 -249
- isa_model/core/resilience/circuit_breaker.py +366 -0
- isa_model/core/security/secrets.py +358 -0
- isa_model/core/services/__init__.py +2 -4
- isa_model/core/services/intelligent_model_selector.py +101 -370
- isa_model/core/storage/hf_storage.py +1 -1
- isa_model/core/types.py +7 -0
- isa_model/deployment/cloud/modal/isa_audio_chatTTS_service.py +520 -0
- isa_model/deployment/cloud/modal/isa_audio_fish_service.py +0 -0
- isa_model/deployment/cloud/modal/isa_audio_openvoice_service.py +758 -0
- isa_model/deployment/cloud/modal/isa_audio_service_v2.py +1044 -0
- isa_model/deployment/cloud/modal/isa_embed_rerank_service.py +296 -0
- isa_model/deployment/cloud/modal/isa_video_hunyuan_service.py +423 -0
- isa_model/deployment/cloud/modal/isa_vision_ocr_service.py +519 -0
- isa_model/deployment/cloud/modal/isa_vision_qwen25_service.py +709 -0
- isa_model/deployment/cloud/modal/isa_vision_table_service.py +467 -323
- isa_model/deployment/cloud/modal/isa_vision_ui_service.py +607 -180
- isa_model/deployment/cloud/modal/isa_vision_ui_service_optimized.py +660 -0
- isa_model/deployment/core/deployment_manager.py +6 -4
- isa_model/deployment/services/auto_hf_modal_deployer.py +894 -0
- isa_model/eval/benchmarks/__init__.py +27 -0
- isa_model/eval/benchmarks/multimodal_datasets.py +460 -0
- isa_model/eval/benchmarks.py +244 -12
- isa_model/eval/evaluators/__init__.py +8 -2
- isa_model/eval/evaluators/audio_evaluator.py +727 -0
- isa_model/eval/evaluators/embedding_evaluator.py +742 -0
- isa_model/eval/evaluators/vision_evaluator.py +564 -0
- isa_model/eval/example_evaluation.py +395 -0
- isa_model/eval/factory.py +272 -5
- isa_model/eval/isa_benchmarks.py +700 -0
- isa_model/eval/isa_integration.py +582 -0
- isa_model/eval/metrics.py +159 -6
- isa_model/eval/tests/unit/test_basic.py +396 -0
- isa_model/inference/ai_factory.py +44 -8
- isa_model/inference/services/audio/__init__.py +21 -0
- isa_model/inference/services/audio/base_realtime_service.py +225 -0
- isa_model/inference/services/audio/isa_tts_service.py +0 -0
- isa_model/inference/services/audio/openai_realtime_service.py +320 -124
- isa_model/inference/services/audio/openai_stt_service.py +32 -6
- isa_model/inference/services/base_service.py +17 -1
- isa_model/inference/services/embedding/__init__.py +13 -0
- isa_model/inference/services/embedding/base_embed_service.py +111 -8
- isa_model/inference/services/embedding/isa_embed_service.py +305 -0
- isa_model/inference/services/embedding/openai_embed_service.py +2 -4
- isa_model/inference/services/embedding/tests/test_embedding.py +222 -0
- isa_model/inference/services/img/__init__.py +2 -2
- isa_model/inference/services/img/base_image_gen_service.py +24 -7
- isa_model/inference/services/img/replicate_image_gen_service.py +84 -422
- isa_model/inference/services/img/services/replicate_face_swap.py +193 -0
- isa_model/inference/services/img/services/replicate_flux.py +226 -0
- isa_model/inference/services/img/services/replicate_flux_kontext.py +219 -0
- isa_model/inference/services/img/services/replicate_sticker_maker.py +249 -0
- isa_model/inference/services/img/tests/test_img_client.py +297 -0
- isa_model/inference/services/llm/base_llm_service.py +30 -6
- isa_model/inference/services/llm/helpers/llm_adapter.py +63 -9
- isa_model/inference/services/llm/ollama_llm_service.py +2 -1
- isa_model/inference/services/llm/openai_llm_service.py +652 -55
- isa_model/inference/services/llm/yyds_llm_service.py +2 -1
- isa_model/inference/services/vision/__init__.py +5 -5
- isa_model/inference/services/vision/base_vision_service.py +118 -185
- isa_model/inference/services/vision/helpers/image_utils.py +11 -5
- isa_model/inference/services/vision/isa_vision_service.py +573 -0
- isa_model/inference/services/vision/tests/test_ocr_client.py +284 -0
- isa_model/serving/api/fastapi_server.py +88 -16
- isa_model/serving/api/middleware/auth.py +311 -0
- isa_model/serving/api/middleware/security.py +278 -0
- isa_model/serving/api/routes/analytics.py +486 -0
- isa_model/serving/api/routes/deployments.py +339 -0
- isa_model/serving/api/routes/evaluations.py +579 -0
- isa_model/serving/api/routes/logs.py +430 -0
- isa_model/serving/api/routes/settings.py +582 -0
- isa_model/serving/api/routes/unified.py +324 -165
- isa_model/serving/api/startup.py +304 -0
- isa_model/serving/modal_proxy_server.py +249 -0
- isa_model/training/__init__.py +100 -6
- isa_model/training/core/__init__.py +4 -1
- isa_model/training/examples/intelligent_training_example.py +281 -0
- isa_model/training/intelligent/__init__.py +25 -0
- isa_model/training/intelligent/decision_engine.py +643 -0
- isa_model/training/intelligent/intelligent_factory.py +888 -0
- isa_model/training/intelligent/knowledge_base.py +751 -0
- isa_model/training/intelligent/resource_optimizer.py +839 -0
- isa_model/training/intelligent/task_classifier.py +576 -0
- isa_model/training/storage/__init__.py +24 -0
- isa_model/training/storage/core_integration.py +439 -0
- isa_model/training/storage/training_repository.py +552 -0
- isa_model/training/storage/training_storage.py +628 -0
- {isa_model-0.3.9.dist-info → isa_model-0.4.0.dist-info}/METADATA +13 -1
- isa_model-0.4.0.dist-info/RECORD +182 -0
- isa_model/deployment/cloud/modal/isa_vision_doc_service.py +0 -766
- isa_model/deployment/cloud/modal/register_models.py +0 -321
- isa_model/inference/adapter/unified_api.py +0 -248
- isa_model/inference/services/helpers/stacked_config.py +0 -148
- isa_model/inference/services/img/flux_professional_service.py +0 -603
- isa_model/inference/services/img/helpers/base_stacked_service.py +0 -274
- isa_model/inference/services/others/table_transformer_service.py +0 -61
- isa_model/inference/services/vision/doc_analysis_service.py +0 -640
- isa_model/inference/services/vision/helpers/base_stacked_service.py +0 -274
- isa_model/inference/services/vision/ui_analysis_service.py +0 -823
- isa_model/scripts/inference_tracker.py +0 -283
- isa_model/scripts/mlflow_manager.py +0 -379
- isa_model/scripts/model_registry.py +0 -465
- isa_model/scripts/register_models.py +0 -370
- isa_model/scripts/register_models_with_embeddings.py +0 -510
- isa_model/scripts/start_mlflow.py +0 -95
- isa_model/scripts/training_tracker.py +0 -257
- isa_model-0.3.9.dist-info/RECORD +0 -138
- {isa_model-0.3.9.dist-info → isa_model-0.4.0.dist-info}/WHEEL +0 -0
- {isa_model-0.3.9.dist-info → isa_model-0.4.0.dist-info}/top_level.txt +0 -0
@@ -110,6 +110,7 @@ class AIFactory:
|
|
110
110
|
|
111
111
|
Args:
|
112
112
|
model_name: Model name. Special names:
|
113
|
+
- "hybrid": Unified UI/Document analysis service (RECOMMENDED)
|
113
114
|
- "isa_vision_table": Table extraction service
|
114
115
|
- "isa_vision_ui": UI detection service
|
115
116
|
- "isa_vision_doc": Document analysis service
|
@@ -120,18 +121,27 @@ class AIFactory:
|
|
120
121
|
Returns:
|
121
122
|
Vision service instance
|
122
123
|
"""
|
123
|
-
# Handle special
|
124
|
-
if model_name
|
124
|
+
# Handle special vision services
|
125
|
+
if model_name == "hybrid":
|
126
|
+
# Hybrid vision service has been deprecated, use OpenAI as fallback
|
127
|
+
logger.warning("HybridVisionService is deprecated, using OpenAI vision service as fallback")
|
128
|
+
final_provider = "openai"
|
129
|
+
final_model_name = "gpt-4.1-nano"
|
130
|
+
|
131
|
+
elif model_name in ["isa_vision_table", "isa_vision_ui", "isa_vision_doc"]:
|
125
132
|
try:
|
126
133
|
from isa_model.deployment.services.simple_auto_deploy_vision_service import SimpleAutoDeployVisionService
|
127
134
|
logger.info(f"Creating auto-deploy service wrapper for {model_name}")
|
128
135
|
return SimpleAutoDeployVisionService(model_name, config)
|
129
136
|
except Exception as e:
|
130
137
|
logger.error(f"Failed to create ISA vision service: {e}")
|
131
|
-
|
138
|
+
# Fallback to ISA service
|
139
|
+
logger.warning(f"Auto-deploy service failed, using ISA vision service as fallback")
|
140
|
+
final_provider = "isa"
|
141
|
+
final_model_name = "isa-omniparser-ui-detection"
|
132
142
|
|
133
143
|
# Set defaults for regular services
|
134
|
-
|
144
|
+
elif provider == "openai":
|
135
145
|
final_model_name = model_name or "gpt-4.1-mini"
|
136
146
|
final_provider = provider
|
137
147
|
elif provider == "ollama":
|
@@ -140,6 +150,9 @@ class AIFactory:
|
|
140
150
|
elif provider == "replicate":
|
141
151
|
final_model_name = model_name or "meta/llama-2-70b-chat"
|
142
152
|
final_provider = provider
|
153
|
+
elif provider == "isa":
|
154
|
+
final_model_name = model_name or "isa-omniparser-ui-detection"
|
155
|
+
final_provider = provider
|
143
156
|
else:
|
144
157
|
# Default provider selection
|
145
158
|
final_provider = provider or "openai"
|
@@ -147,6 +160,8 @@ class AIFactory:
|
|
147
160
|
final_model_name = model_name or "gpt-4.1-mini"
|
148
161
|
elif final_provider == "ollama":
|
149
162
|
final_model_name = model_name or "llama3.2-vision:latest"
|
163
|
+
elif final_provider == "isa":
|
164
|
+
final_model_name = model_name or "isa-omniparser-ui-detection"
|
150
165
|
else:
|
151
166
|
final_model_name = model_name or "gpt-4.1-mini"
|
152
167
|
|
@@ -160,6 +175,10 @@ class AIFactory:
|
|
160
175
|
from isa_model.inference.services.vision.replicate_vision_service import ReplicateVisionService
|
161
176
|
return ReplicateVisionService(provider_name=final_provider, model_name=final_model_name,
|
162
177
|
model_manager=self.model_manager, config_manager=self.config_manager)
|
178
|
+
elif final_provider == "isa":
|
179
|
+
from isa_model.inference.services.vision.isa_vision_service import ISAVisionService
|
180
|
+
logger.info(f"Creating ISA Vision Service with model: {final_model_name}")
|
181
|
+
return ISAVisionService()
|
163
182
|
else:
|
164
183
|
raise ValueError(f"Unsupported vision provider: {final_provider}")
|
165
184
|
except Exception as e:
|
@@ -322,17 +341,34 @@ class AIFactory:
|
|
322
341
|
final_model_name = model_name or "bge-m3"
|
323
342
|
|
324
343
|
# Create service using new centralized approach
|
344
|
+
# Create cache key
|
345
|
+
cache_key = f"embed_{final_provider}_{final_model_name}"
|
346
|
+
|
347
|
+
# Check cache first
|
348
|
+
if cache_key in self._cached_services:
|
349
|
+
logger.debug(f"Using cached embedding service: {cache_key}")
|
350
|
+
return self._cached_services[cache_key]
|
351
|
+
|
325
352
|
try:
|
326
353
|
if final_provider == "openai":
|
327
354
|
from isa_model.inference.services.embedding.openai_embed_service import OpenAIEmbedService
|
328
|
-
|
329
|
-
|
355
|
+
service = OpenAIEmbedService(provider_name=final_provider, model_name=final_model_name,
|
356
|
+
model_manager=self.model_manager, config_manager=self.config_manager)
|
330
357
|
elif final_provider == "ollama":
|
331
358
|
from isa_model.inference.services.embedding.ollama_embed_service import OllamaEmbedService
|
332
|
-
|
333
|
-
|
359
|
+
service = OllamaEmbedService(provider_name=final_provider, model_name=final_model_name,
|
360
|
+
model_manager=self.model_manager, config_manager=self.config_manager)
|
361
|
+
elif final_provider == "isa":
|
362
|
+
from isa_model.inference.services.embedding.isa_embed_service import ISAEmbedService
|
363
|
+
service = ISAEmbedService() # ISA service doesn't use model_manager/config_manager yet
|
334
364
|
else:
|
335
365
|
raise ValueError(f"Unsupported embedding provider: {final_provider}")
|
366
|
+
|
367
|
+
# Cache the service
|
368
|
+
self._cached_services[cache_key] = service
|
369
|
+
logger.debug(f"Created and cached embedding service: {cache_key}")
|
370
|
+
return service
|
371
|
+
|
336
372
|
except Exception as e:
|
337
373
|
logger.error(f"Failed to create embedding service: {e}")
|
338
374
|
raise
|
@@ -0,0 +1,21 @@
|
|
1
|
+
"""
|
2
|
+
Audio Services - Speech, TTS, and Audio Processing Services
|
3
|
+
"""
|
4
|
+
|
5
|
+
from .base_stt_service import BaseSTTService
|
6
|
+
from .base_tts_service import BaseTTSService
|
7
|
+
from .base_realtime_service import BaseRealtimeService
|
8
|
+
from .openai_stt_service import OpenAISTTService
|
9
|
+
from .openai_tts_service import OpenAITTSService
|
10
|
+
from .openai_realtime_service import OpenAIRealtimeService
|
11
|
+
from .replicate_tts_service import ReplicateTTSService
|
12
|
+
|
13
|
+
__all__ = [
|
14
|
+
'BaseSTTService',
|
15
|
+
'BaseeTTSService',
|
16
|
+
'BaseRealtimeService',
|
17
|
+
'OpenAISTTService',
|
18
|
+
'OpenAITTSService',
|
19
|
+
'OpenAIRealtimeService',
|
20
|
+
'ReplicateTTSService'
|
21
|
+
]
|
@@ -0,0 +1,225 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from typing import Dict, Any, List, Union, Optional, Callable, AsyncGenerator
|
3
|
+
from enum import Enum
|
4
|
+
import asyncio
|
5
|
+
from isa_model.inference.services.base_service import BaseService
|
6
|
+
|
7
|
+
|
8
|
+
class RealtimeEventType(Enum):
|
9
|
+
"""Realtime API event types"""
|
10
|
+
# Session events
|
11
|
+
SESSION_CREATED = "session.created"
|
12
|
+
SESSION_UPDATED = "session.updated"
|
13
|
+
|
14
|
+
# Input audio events
|
15
|
+
INPUT_AUDIO_BUFFER_APPEND = "input_audio_buffer.append"
|
16
|
+
INPUT_AUDIO_BUFFER_COMMIT = "input_audio_buffer.commit"
|
17
|
+
INPUT_AUDIO_BUFFER_CLEAR = "input_audio_buffer.clear"
|
18
|
+
INPUT_AUDIO_BUFFER_COMMITTED = "input_audio_buffer.committed"
|
19
|
+
INPUT_AUDIO_BUFFER_SPEECH_STARTED = "input_audio_buffer.speech_started"
|
20
|
+
INPUT_AUDIO_BUFFER_SPEECH_STOPPED = "input_audio_buffer.speech_stopped"
|
21
|
+
|
22
|
+
# Conversation events
|
23
|
+
CONVERSATION_ITEM_CREATE = "conversation.item.create"
|
24
|
+
CONVERSATION_ITEM_CREATED = "conversation.item.created"
|
25
|
+
CONVERSATION_ITEM_DELETE = "conversation.item.delete"
|
26
|
+
CONVERSATION_ITEM_DELETED = "conversation.item.deleted"
|
27
|
+
CONVERSATION_ITEM_TRUNCATE = "conversation.item.truncate"
|
28
|
+
CONVERSATION_ITEM_TRUNCATED = "conversation.item.truncated"
|
29
|
+
|
30
|
+
# Response events
|
31
|
+
RESPONSE_CREATE = "response.create"
|
32
|
+
RESPONSE_CREATED = "response.created"
|
33
|
+
RESPONSE_DONE = "response.done"
|
34
|
+
RESPONSE_OUTPUT_ITEM_ADDED = "response.output_item.added"
|
35
|
+
RESPONSE_OUTPUT_ITEM_DONE = "response.output_item.done"
|
36
|
+
RESPONSE_CONTENT_PART_ADDED = "response.content_part.added"
|
37
|
+
RESPONSE_CONTENT_PART_DONE = "response.content_part.done"
|
38
|
+
RESPONSE_TEXT_DELTA = "response.text.delta"
|
39
|
+
RESPONSE_TEXT_DONE = "response.text.done"
|
40
|
+
RESPONSE_AUDIO_TRANSCRIPT_DELTA = "response.audio_transcript.delta"
|
41
|
+
RESPONSE_AUDIO_TRANSCRIPT_DONE = "response.audio_transcript.done"
|
42
|
+
RESPONSE_AUDIO_DELTA = "response.audio.delta"
|
43
|
+
RESPONSE_AUDIO_DONE = "response.audio.done"
|
44
|
+
RESPONSE_FUNCTION_CALL_ARGUMENTS_DELTA = "response.function_call_arguments.delta"
|
45
|
+
RESPONSE_FUNCTION_CALL_ARGUMENTS_DONE = "response.function_call_arguments.done"
|
46
|
+
|
47
|
+
# Rate limit events
|
48
|
+
RATE_LIMITS_UPDATED = "rate_limits.updated"
|
49
|
+
|
50
|
+
# Error events
|
51
|
+
ERROR = "error"
|
52
|
+
|
53
|
+
|
54
|
+
class BaseRealtimeService(BaseService):
|
55
|
+
"""Base class for Realtime API services"""
|
56
|
+
|
57
|
+
def __init__(self, provider_name: str, model_name: str, **kwargs):
|
58
|
+
super().__init__(provider_name, model_name, **kwargs)
|
59
|
+
self.session_id: Optional[str] = None
|
60
|
+
self.websocket = None
|
61
|
+
self.event_handlers: Dict[str, List[Callable]] = {}
|
62
|
+
self.is_connected = False
|
63
|
+
|
64
|
+
async def invoke(
|
65
|
+
self,
|
66
|
+
task: str,
|
67
|
+
**kwargs
|
68
|
+
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
|
69
|
+
"""
|
70
|
+
统一的任务分发方法 - 支持实时对话任务
|
71
|
+
|
72
|
+
Args:
|
73
|
+
task: 任务类型,支持多种实时对话任务
|
74
|
+
**kwargs: 任务特定的附加参数
|
75
|
+
|
76
|
+
Returns:
|
77
|
+
Dict containing task results
|
78
|
+
"""
|
79
|
+
if task == "create_session":
|
80
|
+
return await self.create_session(**kwargs)
|
81
|
+
elif task == "connect":
|
82
|
+
return await self.connect_websocket(**kwargs)
|
83
|
+
elif task == "send_audio":
|
84
|
+
if not kwargs.get("audio_data"):
|
85
|
+
raise ValueError("audio_data is required for send_audio task")
|
86
|
+
return await self.send_audio_message(kwargs["audio_data"], **kwargs)
|
87
|
+
elif task == "send_text":
|
88
|
+
if not kwargs.get("text"):
|
89
|
+
raise ValueError("text is required for send_text task")
|
90
|
+
return await self.send_text_message(kwargs["text"], **kwargs)
|
91
|
+
elif task == "listen":
|
92
|
+
return await self.listen_for_responses(**kwargs)
|
93
|
+
elif task == "audio_chat":
|
94
|
+
return await self.simple_audio_chat(**kwargs)
|
95
|
+
elif task == "text_chat":
|
96
|
+
return await self.simple_text_chat(**kwargs)
|
97
|
+
else:
|
98
|
+
raise NotImplementedError(f"{self.__class__.__name__} does not support task: {task}")
|
99
|
+
|
100
|
+
def get_supported_tasks(self) -> List[str]:
|
101
|
+
"""获取支持的任务列表"""
|
102
|
+
return [
|
103
|
+
"create_session", "connect", "send_audio", "send_text",
|
104
|
+
"listen", "audio_chat", "text_chat"
|
105
|
+
]
|
106
|
+
|
107
|
+
@abstractmethod
|
108
|
+
async def create_session(
|
109
|
+
self,
|
110
|
+
instructions: str = "You are a helpful assistant.",
|
111
|
+
modalities: Optional[List[str]] = None,
|
112
|
+
voice: str = "alloy",
|
113
|
+
**kwargs
|
114
|
+
) -> Dict[str, Any]:
|
115
|
+
"""Create a new realtime session"""
|
116
|
+
pass
|
117
|
+
|
118
|
+
@abstractmethod
|
119
|
+
async def connect_websocket(self, **kwargs) -> bool:
|
120
|
+
"""Connect to the realtime WebSocket"""
|
121
|
+
pass
|
122
|
+
|
123
|
+
@abstractmethod
|
124
|
+
async def send_audio_message(
|
125
|
+
self,
|
126
|
+
audio_data: bytes,
|
127
|
+
format: str = "pcm16",
|
128
|
+
**kwargs
|
129
|
+
) -> Dict[str, Any]:
|
130
|
+
"""Send audio data to the realtime session"""
|
131
|
+
pass
|
132
|
+
|
133
|
+
@abstractmethod
|
134
|
+
async def send_text_message(
|
135
|
+
self,
|
136
|
+
text: str,
|
137
|
+
**kwargs
|
138
|
+
) -> Dict[str, Any]:
|
139
|
+
"""Send text message to the realtime session"""
|
140
|
+
pass
|
141
|
+
|
142
|
+
@abstractmethod
|
143
|
+
async def listen_for_responses(
|
144
|
+
self,
|
145
|
+
message_handler: Optional[Callable] = None,
|
146
|
+
**kwargs
|
147
|
+
) -> AsyncGenerator[Dict[str, Any], None]:
|
148
|
+
"""Listen for responses from the realtime session"""
|
149
|
+
pass
|
150
|
+
|
151
|
+
@abstractmethod
|
152
|
+
async def simple_audio_chat(
|
153
|
+
self,
|
154
|
+
audio_data: bytes,
|
155
|
+
instructions: str = "You are a helpful assistant. Respond in audio.",
|
156
|
+
voice: str = "alloy",
|
157
|
+
**kwargs
|
158
|
+
) -> Dict[str, Any]:
|
159
|
+
"""Simple audio chat - send audio, get audio response"""
|
160
|
+
pass
|
161
|
+
|
162
|
+
@abstractmethod
|
163
|
+
async def simple_text_chat(
|
164
|
+
self,
|
165
|
+
text: str,
|
166
|
+
instructions: str = "You are a helpful assistant.",
|
167
|
+
voice: str = "alloy",
|
168
|
+
**kwargs
|
169
|
+
) -> Dict[str, Any]:
|
170
|
+
"""Simple text chat - send text, get audio/text response"""
|
171
|
+
pass
|
172
|
+
|
173
|
+
def add_event_handler(self, event_type: Union[str, RealtimeEventType], handler: Callable):
|
174
|
+
"""Add event handler for specific event type"""
|
175
|
+
event_name = event_type.value if isinstance(event_type, RealtimeEventType) else event_type
|
176
|
+
if event_name not in self.event_handlers:
|
177
|
+
self.event_handlers[event_name] = []
|
178
|
+
self.event_handlers[event_name].append(handler)
|
179
|
+
|
180
|
+
def remove_event_handler(self, event_type: Union[str, RealtimeEventType], handler: Callable):
|
181
|
+
"""Remove event handler"""
|
182
|
+
event_name = event_type.value if isinstance(event_type, RealtimeEventType) else event_type
|
183
|
+
if event_name in self.event_handlers:
|
184
|
+
self.event_handlers[event_name].remove(handler)
|
185
|
+
|
186
|
+
async def _handle_event(self, event: Dict[str, Any]):
|
187
|
+
"""Handle incoming events"""
|
188
|
+
event_type = event.get("type")
|
189
|
+
if event_type in self.event_handlers:
|
190
|
+
for handler in self.event_handlers[event_type]:
|
191
|
+
try:
|
192
|
+
await handler(event)
|
193
|
+
except Exception as e:
|
194
|
+
import logging
|
195
|
+
logging.getLogger(__name__).error(f"Error in event handler for {event_type}: {e}")
|
196
|
+
|
197
|
+
@abstractmethod
|
198
|
+
def get_supported_voices(self) -> List[str]:
|
199
|
+
"""Get list of supported voice options"""
|
200
|
+
pass
|
201
|
+
|
202
|
+
@abstractmethod
|
203
|
+
def get_supported_formats(self) -> List[str]:
|
204
|
+
"""Get list of supported audio formats"""
|
205
|
+
pass
|
206
|
+
|
207
|
+
@abstractmethod
|
208
|
+
def get_session_limits(self) -> Dict[str, Any]:
|
209
|
+
"""Get session limits and constraints"""
|
210
|
+
pass
|
211
|
+
|
212
|
+
@abstractmethod
|
213
|
+
async def update_session(self, **kwargs) -> Dict[str, Any]:
|
214
|
+
"""Update session configuration"""
|
215
|
+
pass
|
216
|
+
|
217
|
+
@abstractmethod
|
218
|
+
async def disconnect(self):
|
219
|
+
"""Disconnect from the realtime session"""
|
220
|
+
pass
|
221
|
+
|
222
|
+
@abstractmethod
|
223
|
+
async def close(self):
|
224
|
+
"""Cleanup resources"""
|
225
|
+
pass
|
File without changes
|