aiecs 1.0.1__py3-none-any.whl → 1.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +13 -16
- aiecs/__main__.py +7 -7
- aiecs/aiecs_client.py +269 -75
- aiecs/application/executors/operation_executor.py +79 -54
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
- aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
- aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
- aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +98 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
- aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
- aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
- aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
- aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
- aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
- aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
- aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
- aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
- aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
- aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
- aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
- aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
- aiecs/application/knowledge_graph/search/reranker.py +293 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +471 -0
- aiecs/config/__init__.py +20 -5
- aiecs/config/config.py +762 -31
- aiecs/config/graph_config.py +131 -0
- aiecs/config/tool_config.py +399 -0
- aiecs/core/__init__.py +29 -13
- aiecs/core/interface/__init__.py +2 -2
- aiecs/core/interface/execution_interface.py +22 -22
- aiecs/core/interface/storage_interface.py +37 -88
- aiecs/core/registry/__init__.py +31 -0
- aiecs/core/registry/service_registry.py +92 -0
- aiecs/domain/__init__.py +270 -1
- aiecs/domain/agent/__init__.py +191 -0
- aiecs/domain/agent/base_agent.py +3870 -0
- aiecs/domain/agent/exceptions.py +99 -0
- aiecs/domain/agent/graph_aware_mixin.py +569 -0
- aiecs/domain/agent/hybrid_agent.py +1435 -0
- aiecs/domain/agent/integration/__init__.py +29 -0
- aiecs/domain/agent/integration/context_compressor.py +216 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
- aiecs/domain/agent/integration/protocols.py +281 -0
- aiecs/domain/agent/integration/retry_policy.py +218 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
- aiecs/domain/agent/lifecycle.py +291 -0
- aiecs/domain/agent/llm_agent.py +692 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +1124 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +163 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
- aiecs/domain/agent/models.py +884 -0
- aiecs/domain/agent/observability.py +479 -0
- aiecs/domain/agent/persistence.py +449 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +159 -0
- aiecs/domain/agent/prompts/formatters.py +187 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +253 -0
- aiecs/domain/agent/tool_agent.py +444 -0
- aiecs/domain/agent/tools/__init__.py +15 -0
- aiecs/domain/agent/tools/schema_generator.py +364 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +469 -0
- aiecs/domain/community/analytics.py +432 -0
- aiecs/domain/community/collaborative_workflow.py +648 -0
- aiecs/domain/community/communication_hub.py +634 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +796 -0
- aiecs/domain/community/community_manager.py +803 -0
- aiecs/domain/community/decision_engine.py +849 -0
- aiecs/domain/community/exceptions.py +231 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +234 -0
- aiecs/domain/community/resource_manager.py +461 -0
- aiecs/domain/community/shared_context_manager.py +589 -0
- aiecs/domain/context/__init__.py +40 -10
- aiecs/domain/context/context_engine.py +1910 -0
- aiecs/domain/context/conversation_models.py +87 -53
- aiecs/domain/context/graph_memory.py +582 -0
- aiecs/domain/execution/model.py +12 -4
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +148 -0
- aiecs/domain/knowledge_graph/models/evidence.py +178 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
- aiecs/domain/knowledge_graph/models/path.py +171 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
- aiecs/domain/knowledge_graph/models/query.py +261 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
- aiecs/domain/knowledge_graph/models/relation.py +202 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
- aiecs/domain/task/dsl_processor.py +172 -56
- aiecs/domain/task/model.py +20 -8
- aiecs/domain/task/task_context.py +27 -24
- aiecs/infrastructure/__init__.py +0 -2
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +837 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
- aiecs/infrastructure/graph_storage/cache.py +424 -0
- aiecs/infrastructure/graph_storage/distributed.py +223 -0
- aiecs/infrastructure/graph_storage/error_handling.py +380 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
- aiecs/infrastructure/graph_storage/metrics.py +344 -0
- aiecs/infrastructure/graph_storage/migration.py +400 -0
- aiecs/infrastructure/graph_storage/pagination.py +483 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
- aiecs/infrastructure/graph_storage/postgres.py +1563 -0
- aiecs/infrastructure/graph_storage/property_storage.py +353 -0
- aiecs/infrastructure/graph_storage/protocols.py +76 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
- aiecs/infrastructure/graph_storage/streaming.py +487 -0
- aiecs/infrastructure/graph_storage/tenant.py +412 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
- aiecs/infrastructure/messaging/websocket_manager.py +51 -35
- aiecs/infrastructure/monitoring/__init__.py +22 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
- aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
- aiecs/infrastructure/monitoring/structured_logger.py +3 -7
- aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
- aiecs/infrastructure/persistence/__init__.py +14 -1
- aiecs/infrastructure/persistence/context_engine_client.py +184 -0
- aiecs/infrastructure/persistence/database_manager.py +67 -43
- aiecs/infrastructure/persistence/file_storage.py +180 -103
- aiecs/infrastructure/persistence/redis_client.py +74 -21
- aiecs/llm/__init__.py +73 -25
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
- aiecs/llm/client_factory.py +224 -36
- aiecs/llm/client_resolver.py +155 -0
- aiecs/llm/clients/__init__.py +38 -0
- aiecs/llm/clients/base_client.py +324 -0
- aiecs/llm/clients/google_function_calling_mixin.py +457 -0
- aiecs/llm/clients/googleai_client.py +241 -0
- aiecs/llm/clients/openai_client.py +158 -0
- aiecs/llm/clients/openai_compatible_mixin.py +367 -0
- aiecs/llm/clients/vertex_client.py +897 -0
- aiecs/llm/clients/xai_client.py +201 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +272 -0
- aiecs/llm/config/config_validator.py +206 -0
- aiecs/llm/config/model_config.py +143 -0
- aiecs/llm/protocols.py +149 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +89 -0
- aiecs/main.py +140 -121
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/module_checker.py +499 -0
- aiecs/scripts/aid/version_manager.py +235 -0
- aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
- aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
- aiecs/scripts/dependance_check/__init__.py +15 -0
- aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
- aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
- aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
- aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
- aiecs/scripts/knowledge_graph/__init__.py +3 -0
- aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
- aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
- aiecs/scripts/tools_develop/README.md +671 -0
- aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
- aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
- aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
- aiecs/scripts/tools_develop/schema_coverage.py +511 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
- aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
- aiecs/scripts/tools_develop/verify_tools.py +352 -0
- aiecs/tasks/__init__.py +0 -1
- aiecs/tasks/worker.py +115 -47
- aiecs/tools/__init__.py +194 -72
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +330 -0
- aiecs/tools/apisource/providers/__init__.py +112 -0
- aiecs/tools/apisource/providers/base.py +671 -0
- aiecs/tools/apisource/providers/census.py +397 -0
- aiecs/tools/apisource/providers/fred.py +535 -0
- aiecs/tools/apisource/providers/newsapi.py +409 -0
- aiecs/tools/apisource/providers/worldbank.py +352 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +363 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
- aiecs/tools/apisource/tool.py +832 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +334 -0
- aiecs/tools/base_tool.py +415 -21
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +607 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
- aiecs/tools/docs/content_insertion_tool.py +1320 -0
- aiecs/tools/docs/document_creator_tool.py +1323 -0
- aiecs/tools/docs/document_layout_tool.py +1160 -0
- aiecs/tools/docs/document_parser_tool.py +1011 -0
- aiecs/tools/docs/document_writer_tool.py +1829 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
- aiecs/tools/langchain_adapter.py +300 -138
- aiecs/tools/schema_generator.py +455 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +581 -0
- aiecs/tools/search_tool/cache.py +264 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +224 -0
- aiecs/tools/search_tool/core.py +778 -0
- aiecs/tools/search_tool/deduplicator.py +119 -0
- aiecs/tools/search_tool/error_handler.py +242 -0
- aiecs/tools/search_tool/metrics.py +343 -0
- aiecs/tools/search_tool/rate_limiter.py +172 -0
- aiecs/tools/search_tool/schemas.py +275 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
- aiecs/tools/statistics/data_loader_tool.py +555 -0
- aiecs/tools/statistics/data_profiler_tool.py +638 -0
- aiecs/tools/statistics/data_transformer_tool.py +580 -0
- aiecs/tools/statistics/data_visualizer_tool.py +498 -0
- aiecs/tools/statistics/model_trainer_tool.py +507 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
- aiecs/tools/task_tools/__init__.py +49 -36
- aiecs/tools/task_tools/chart_tool.py +200 -184
- aiecs/tools/task_tools/classfire_tool.py +268 -267
- aiecs/tools/task_tools/image_tool.py +175 -131
- aiecs/tools/task_tools/office_tool.py +226 -146
- aiecs/tools/task_tools/pandas_tool.py +477 -121
- aiecs/tools/task_tools/report_tool.py +390 -142
- aiecs/tools/task_tools/research_tool.py +149 -79
- aiecs/tools/task_tools/scraper_tool.py +339 -145
- aiecs/tools/task_tools/stats_tool.py +448 -209
- aiecs/tools/temp_file_manager.py +26 -24
- aiecs/tools/tool_executor/__init__.py +18 -16
- aiecs/tools/tool_executor/tool_executor.py +364 -52
- aiecs/utils/LLM_output_structor.py +74 -48
- aiecs/utils/__init__.py +14 -3
- aiecs/utils/base_callback.py +0 -3
- aiecs/utils/cache_provider.py +696 -0
- aiecs/utils/execution_utils.py +50 -31
- aiecs/utils/prompt_loader.py +1 -0
- aiecs/utils/token_usage_repository.py +37 -11
- aiecs/ws/socket_server.py +14 -4
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/METADATA +52 -15
- aiecs-1.7.6.dist-info/RECORD +337 -0
- aiecs-1.7.6.dist-info/entry_points.txt +13 -0
- aiecs/config/registry.py +0 -19
- aiecs/domain/context/content_engine.py +0 -982
- aiecs/llm/base_client.py +0 -99
- aiecs/llm/openai_client.py +0 -125
- aiecs/llm/vertex_client.py +0 -186
- aiecs/llm/xai_client.py +0 -184
- aiecs/scripts/dependency_checker.py +0 -857
- aiecs/scripts/quick_dependency_check.py +0 -269
- aiecs/tools/task_tools/search_api.py +0 -7
- aiecs-1.0.1.dist-info/RECORD +0 -90
- aiecs-1.0.1.dist-info/entry_points.txt +0 -7
- /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
- /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
- /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
- /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/WHEEL +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Optional, List, Dict, AsyncGenerator, cast, Any
|
|
3
|
+
from openai import AsyncOpenAI
|
|
4
|
+
from tenacity import (
|
|
5
|
+
retry,
|
|
6
|
+
stop_after_attempt,
|
|
7
|
+
wait_exponential,
|
|
8
|
+
retry_if_exception_type,
|
|
9
|
+
)
|
|
10
|
+
import httpx
|
|
11
|
+
|
|
12
|
+
from aiecs.llm.clients.base_client import (
|
|
13
|
+
BaseLLMClient,
|
|
14
|
+
LLMMessage,
|
|
15
|
+
LLMResponse,
|
|
16
|
+
ProviderNotAvailableError,
|
|
17
|
+
RateLimitError,
|
|
18
|
+
)
|
|
19
|
+
from aiecs.llm.clients.openai_compatible_mixin import (
|
|
20
|
+
OpenAICompatibleFunctionCallingMixin,
|
|
21
|
+
StreamChunk,
|
|
22
|
+
)
|
|
23
|
+
from aiecs.config.config import get_settings
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class OpenAIClient(BaseLLMClient, OpenAICompatibleFunctionCallingMixin):
|
|
29
|
+
"""OpenAI provider client"""
|
|
30
|
+
|
|
31
|
+
def __init__(self) -> None:
|
|
32
|
+
super().__init__("OpenAI")
|
|
33
|
+
self.settings = get_settings()
|
|
34
|
+
self._client: Optional[AsyncOpenAI] = None
|
|
35
|
+
|
|
36
|
+
def _get_client(self) -> AsyncOpenAI:
|
|
37
|
+
"""Lazy initialization of OpenAI client"""
|
|
38
|
+
if not self._client:
|
|
39
|
+
if not self.settings.openai_api_key:
|
|
40
|
+
raise ProviderNotAvailableError("OpenAI API key not configured")
|
|
41
|
+
self._client = AsyncOpenAI(api_key=self.settings.openai_api_key)
|
|
42
|
+
return self._client
|
|
43
|
+
|
|
44
|
+
@retry(
|
|
45
|
+
stop=stop_after_attempt(3),
|
|
46
|
+
wait=wait_exponential(multiplier=1, min=4, max=10),
|
|
47
|
+
retry=retry_if_exception_type((httpx.RequestError, RateLimitError)),
|
|
48
|
+
)
|
|
49
|
+
async def generate_text(
|
|
50
|
+
self,
|
|
51
|
+
messages: List[LLMMessage],
|
|
52
|
+
model: Optional[str] = None,
|
|
53
|
+
temperature: float = 0.7,
|
|
54
|
+
max_tokens: Optional[int] = None,
|
|
55
|
+
functions: Optional[List[Dict[str, Any]]] = None,
|
|
56
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
57
|
+
tool_choice: Optional[Any] = None,
|
|
58
|
+
**kwargs,
|
|
59
|
+
) -> LLMResponse:
|
|
60
|
+
"""
|
|
61
|
+
Generate text using OpenAI API with optional function calling support.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
messages: List of LLM messages
|
|
65
|
+
model: Model name (optional)
|
|
66
|
+
temperature: Temperature for generation
|
|
67
|
+
max_tokens: Maximum tokens to generate
|
|
68
|
+
functions: List of function schemas (legacy format)
|
|
69
|
+
tools: List of tool schemas (new format, recommended)
|
|
70
|
+
tool_choice: Tool choice strategy ("auto", "none", or specific tool)
|
|
71
|
+
**kwargs: Additional arguments passed to OpenAI API
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
LLMResponse with content and optional function_call information
|
|
75
|
+
"""
|
|
76
|
+
client = self._get_client()
|
|
77
|
+
|
|
78
|
+
# Get model name from config if not provided
|
|
79
|
+
model = model or self._get_default_model() or "gpt-4-turbo"
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
# Use mixin method for Function Calling support
|
|
83
|
+
return await self._generate_text_with_function_calling(
|
|
84
|
+
client=client,
|
|
85
|
+
messages=messages,
|
|
86
|
+
model=model,
|
|
87
|
+
temperature=temperature,
|
|
88
|
+
max_tokens=max_tokens,
|
|
89
|
+
functions=functions,
|
|
90
|
+
tools=tools,
|
|
91
|
+
tool_choice=tool_choice,
|
|
92
|
+
**kwargs,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
except Exception as e:
|
|
96
|
+
if "rate_limit" in str(e).lower():
|
|
97
|
+
raise RateLimitError(f"OpenAI rate limit exceeded: {str(e)}")
|
|
98
|
+
raise
|
|
99
|
+
|
|
100
|
+
async def stream_text( # type: ignore[override]
|
|
101
|
+
self,
|
|
102
|
+
messages: List[LLMMessage],
|
|
103
|
+
model: Optional[str] = None,
|
|
104
|
+
temperature: float = 0.7,
|
|
105
|
+
max_tokens: Optional[int] = None,
|
|
106
|
+
functions: Optional[List[Dict[str, Any]]] = None,
|
|
107
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
108
|
+
tool_choice: Optional[Any] = None,
|
|
109
|
+
return_chunks: bool = False,
|
|
110
|
+
**kwargs,
|
|
111
|
+
) -> AsyncGenerator[Any, None]:
|
|
112
|
+
"""
|
|
113
|
+
Stream text using OpenAI API with optional function calling support.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
messages: List of LLM messages
|
|
117
|
+
model: Model name (optional)
|
|
118
|
+
temperature: Temperature for generation
|
|
119
|
+
max_tokens: Maximum tokens to generate
|
|
120
|
+
functions: List of function schemas (legacy format)
|
|
121
|
+
tools: List of tool schemas (new format, recommended)
|
|
122
|
+
tool_choice: Tool choice strategy ("auto", "none", or specific tool)
|
|
123
|
+
return_chunks: If True, returns StreamChunk objects with tool_calls info; if False, returns str tokens only
|
|
124
|
+
**kwargs: Additional arguments passed to OpenAI API
|
|
125
|
+
|
|
126
|
+
Yields:
|
|
127
|
+
str or StreamChunk: Text tokens as they are generated, or StreamChunk objects if return_chunks=True
|
|
128
|
+
"""
|
|
129
|
+
client = self._get_client()
|
|
130
|
+
|
|
131
|
+
# Get model name from config if not provided
|
|
132
|
+
model = model or self._get_default_model() or "gpt-4-turbo"
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
# Use mixin method for Function Calling support
|
|
136
|
+
async for chunk in self._stream_text_with_function_calling(
|
|
137
|
+
client=client,
|
|
138
|
+
messages=messages,
|
|
139
|
+
model=model,
|
|
140
|
+
temperature=temperature,
|
|
141
|
+
max_tokens=max_tokens,
|
|
142
|
+
functions=functions,
|
|
143
|
+
tools=tools,
|
|
144
|
+
tool_choice=tool_choice,
|
|
145
|
+
return_chunks=return_chunks,
|
|
146
|
+
**kwargs,
|
|
147
|
+
):
|
|
148
|
+
yield chunk
|
|
149
|
+
except Exception as e:
|
|
150
|
+
if "rate_limit" in str(e).lower():
|
|
151
|
+
raise RateLimitError(f"OpenAI rate limit exceeded: {str(e)}")
|
|
152
|
+
raise
|
|
153
|
+
|
|
154
|
+
async def close(self):
|
|
155
|
+
"""Clean up resources"""
|
|
156
|
+
if self._client:
|
|
157
|
+
await self._client.close()
|
|
158
|
+
self._client = None
|
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI-Compatible Function Calling Mixin
|
|
3
|
+
|
|
4
|
+
Provides shared implementation for OpenAI-compatible providers (OpenAI, xAI, etc.)
|
|
5
|
+
that use the same API format for Function Calling.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Dict, Any, Optional, List, AsyncGenerator, cast, Union
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from openai import AsyncOpenAI
|
|
12
|
+
|
|
13
|
+
from .base_client import LLMMessage, LLMResponse
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class StreamChunk:
|
|
20
|
+
"""
|
|
21
|
+
Represents a chunk in streaming response.
|
|
22
|
+
|
|
23
|
+
Can contain either a text token or tool call information.
|
|
24
|
+
"""
|
|
25
|
+
type: str # "token" or "tool_call"
|
|
26
|
+
content: Optional[str] = None # Text token content
|
|
27
|
+
tool_call: Optional[Dict[str, Any]] = None # Tool call information
|
|
28
|
+
tool_calls: Optional[List[Dict[str, Any]]] = None # Complete tool calls (when stream ends)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class OpenAICompatibleFunctionCallingMixin:
|
|
32
|
+
"""
|
|
33
|
+
Mixin class providing OpenAI-compatible Function Calling implementation.
|
|
34
|
+
|
|
35
|
+
This mixin can be used by any provider that uses OpenAI-compatible API format.
|
|
36
|
+
Examples: OpenAI, xAI (Grok), and other OpenAI-compatible providers.
|
|
37
|
+
|
|
38
|
+
Usage:
|
|
39
|
+
class MyClient(BaseLLMClient, OpenAICompatibleFunctionCallingMixin):
|
|
40
|
+
def _get_openai_client(self) -> AsyncOpenAI:
|
|
41
|
+
# Return OpenAI-compatible client
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
async def generate_text(self, messages, **kwargs):
|
|
45
|
+
return await self._generate_text_with_function_calling(
|
|
46
|
+
messages, **kwargs
|
|
47
|
+
)
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def _convert_messages_to_openai_format(self, messages: List[LLMMessage]) -> List[Dict[str, Any]]:
|
|
51
|
+
"""
|
|
52
|
+
Convert LLMMessage list to OpenAI message format (support tool calls).
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
messages: List of LLMMessage objects
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
List of OpenAI-format message dictionaries
|
|
59
|
+
"""
|
|
60
|
+
openai_messages = []
|
|
61
|
+
for msg in messages:
|
|
62
|
+
msg_dict: Dict[str, Any] = {"role": msg.role}
|
|
63
|
+
if msg.content is not None:
|
|
64
|
+
msg_dict["content"] = msg.content
|
|
65
|
+
if msg.tool_calls:
|
|
66
|
+
msg_dict["tool_calls"] = msg.tool_calls
|
|
67
|
+
if msg.tool_call_id:
|
|
68
|
+
msg_dict["tool_call_id"] = msg.tool_call_id
|
|
69
|
+
openai_messages.append(msg_dict)
|
|
70
|
+
return openai_messages
|
|
71
|
+
|
|
72
|
+
def _prepare_function_calling_params(
|
|
73
|
+
self,
|
|
74
|
+
functions: Optional[List[Dict[str, Any]]] = None,
|
|
75
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
76
|
+
tool_choice: Optional[Any] = None,
|
|
77
|
+
) -> Dict[str, Any]:
|
|
78
|
+
"""
|
|
79
|
+
Prepare function calling parameters for OpenAI-compatible API.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
functions: List of function schemas (legacy format)
|
|
83
|
+
tools: List of tool schemas (new format, recommended)
|
|
84
|
+
tool_choice: Tool choice strategy ("auto", "none", or specific tool)
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Dictionary with function calling parameters
|
|
88
|
+
"""
|
|
89
|
+
params: Dict[str, Any] = {}
|
|
90
|
+
|
|
91
|
+
# Prefer 'tools' parameter (new format) over 'functions' (legacy)
|
|
92
|
+
if tools:
|
|
93
|
+
params["tools"] = tools
|
|
94
|
+
if tool_choice is not None:
|
|
95
|
+
params["tool_choice"] = tool_choice
|
|
96
|
+
elif functions:
|
|
97
|
+
# Legacy format - convert to tools format for consistency
|
|
98
|
+
params["tools"] = [{"type": "function", "function": func} for func in functions]
|
|
99
|
+
if tool_choice is not None:
|
|
100
|
+
params["tool_choice"] = tool_choice
|
|
101
|
+
|
|
102
|
+
return params
|
|
103
|
+
|
|
104
|
+
def _extract_function_calls_from_response(self, message: Any) -> tuple[Optional[Dict[str, Any]], Optional[List[Dict[str, Any]]]]:
|
|
105
|
+
"""
|
|
106
|
+
Extract function calls from OpenAI-compatible response message.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
message: Response message object from OpenAI SDK
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Tuple of (function_call, tool_calls)
|
|
113
|
+
- function_call: Legacy format function call (if present)
|
|
114
|
+
- tool_calls: New format tool calls (if present)
|
|
115
|
+
"""
|
|
116
|
+
function_call = None
|
|
117
|
+
tool_calls = None
|
|
118
|
+
|
|
119
|
+
# Check for legacy function_call format
|
|
120
|
+
if hasattr(message, "function_call") and message.function_call:
|
|
121
|
+
function_call = {
|
|
122
|
+
"name": message.function_call.name,
|
|
123
|
+
"arguments": message.function_call.arguments,
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
# Check for new tool_calls format
|
|
127
|
+
elif hasattr(message, "tool_calls") and message.tool_calls:
|
|
128
|
+
tool_calls = [
|
|
129
|
+
{
|
|
130
|
+
"id": tc.id,
|
|
131
|
+
"type": tc.type,
|
|
132
|
+
"function": {
|
|
133
|
+
"name": tc.function.name,
|
|
134
|
+
"arguments": tc.function.arguments,
|
|
135
|
+
},
|
|
136
|
+
}
|
|
137
|
+
for tc in message.tool_calls
|
|
138
|
+
]
|
|
139
|
+
|
|
140
|
+
return function_call, tool_calls
|
|
141
|
+
|
|
142
|
+
def _attach_function_calls_to_response(
|
|
143
|
+
self,
|
|
144
|
+
response: LLMResponse,
|
|
145
|
+
function_call: Optional[Dict[str, Any]] = None,
|
|
146
|
+
tool_calls: Optional[List[Dict[str, Any]]] = None,
|
|
147
|
+
) -> LLMResponse:
|
|
148
|
+
"""
|
|
149
|
+
Attach function call information to LLMResponse.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
response: LLMResponse object
|
|
153
|
+
function_call: Legacy format function call
|
|
154
|
+
tool_calls: New format tool calls
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
LLMResponse with function call info attached
|
|
158
|
+
"""
|
|
159
|
+
if function_call:
|
|
160
|
+
setattr(response, "function_call", function_call)
|
|
161
|
+
if tool_calls:
|
|
162
|
+
setattr(response, "tool_calls", tool_calls)
|
|
163
|
+
return response
|
|
164
|
+
|
|
165
|
+
async def _generate_text_with_function_calling(
|
|
166
|
+
self,
|
|
167
|
+
client: AsyncOpenAI,
|
|
168
|
+
messages: List[LLMMessage],
|
|
169
|
+
model: str,
|
|
170
|
+
temperature: float,
|
|
171
|
+
max_tokens: Optional[int],
|
|
172
|
+
functions: Optional[List[Dict[str, Any]]] = None,
|
|
173
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
174
|
+
tool_choice: Optional[Any] = None,
|
|
175
|
+
**kwargs,
|
|
176
|
+
) -> LLMResponse:
|
|
177
|
+
"""
|
|
178
|
+
Generate text with Function Calling support (OpenAI-compatible).
|
|
179
|
+
|
|
180
|
+
This is a helper method that can be called by subclasses to implement
|
|
181
|
+
generate_text() with Function Calling support.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
client: AsyncOpenAI client instance
|
|
185
|
+
messages: List of LLM messages
|
|
186
|
+
model: Model name
|
|
187
|
+
temperature: Temperature for generation
|
|
188
|
+
max_tokens: Maximum tokens to generate
|
|
189
|
+
functions: List of function schemas (legacy format)
|
|
190
|
+
tools: List of tool schemas (new format)
|
|
191
|
+
tool_choice: Tool choice strategy
|
|
192
|
+
**kwargs: Additional arguments
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
LLMResponse with optional function_call information
|
|
196
|
+
"""
|
|
197
|
+
# Convert messages to OpenAI format
|
|
198
|
+
openai_messages = self._convert_messages_to_openai_format(messages)
|
|
199
|
+
|
|
200
|
+
# Prepare API call parameters
|
|
201
|
+
api_params: Dict[str, Any] = {
|
|
202
|
+
"model": model,
|
|
203
|
+
"messages": cast(Any, openai_messages), # type: ignore[arg-type]
|
|
204
|
+
"temperature": temperature,
|
|
205
|
+
"max_tokens": max_tokens,
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
# Add function calling support
|
|
209
|
+
fc_params = self._prepare_function_calling_params(functions, tools, tool_choice)
|
|
210
|
+
api_params.update(fc_params)
|
|
211
|
+
|
|
212
|
+
# Add any additional kwargs
|
|
213
|
+
api_params.update(kwargs)
|
|
214
|
+
|
|
215
|
+
# Make API call
|
|
216
|
+
response = await client.chat.completions.create(**api_params)
|
|
217
|
+
|
|
218
|
+
# Extract response content
|
|
219
|
+
message = response.choices[0].message
|
|
220
|
+
content = message.content or ""
|
|
221
|
+
|
|
222
|
+
# Extract function calls
|
|
223
|
+
function_call, tool_calls = self._extract_function_calls_from_response(message)
|
|
224
|
+
|
|
225
|
+
# Extract token usage
|
|
226
|
+
tokens_used = response.usage.total_tokens if response.usage else None
|
|
227
|
+
input_tokens = response.usage.prompt_tokens if response.usage else 0
|
|
228
|
+
output_tokens = response.usage.completion_tokens if response.usage else 0
|
|
229
|
+
|
|
230
|
+
# Extract cache metadata from OpenAI response
|
|
231
|
+
# OpenAI returns cached_tokens in prompt_tokens_details for supported models
|
|
232
|
+
cache_read_tokens = None
|
|
233
|
+
cache_hit = None
|
|
234
|
+
if response.usage and hasattr(response.usage, "prompt_tokens_details"):
|
|
235
|
+
details = response.usage.prompt_tokens_details
|
|
236
|
+
if details and hasattr(details, "cached_tokens"):
|
|
237
|
+
cache_read_tokens = details.cached_tokens
|
|
238
|
+
cache_hit = cache_read_tokens is not None and cache_read_tokens > 0
|
|
239
|
+
|
|
240
|
+
# Create response
|
|
241
|
+
llm_response = LLMResponse(
|
|
242
|
+
content=content,
|
|
243
|
+
provider=self.provider_name, # type: ignore[attr-defined]
|
|
244
|
+
model=model,
|
|
245
|
+
tokens_used=tokens_used,
|
|
246
|
+
prompt_tokens=input_tokens if response.usage else None,
|
|
247
|
+
completion_tokens=output_tokens if response.usage else None,
|
|
248
|
+
cost_estimate=self._estimate_cost_from_config(model, input_tokens, output_tokens) # type: ignore[attr-defined]
|
|
249
|
+
if hasattr(self, "_estimate_cost_from_config") else None,
|
|
250
|
+
cache_read_tokens=cache_read_tokens,
|
|
251
|
+
cache_hit=cache_hit,
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
# Attach function call info
|
|
255
|
+
return self._attach_function_calls_to_response(llm_response, function_call, tool_calls)
|
|
256
|
+
|
|
257
|
+
async def _stream_text_with_function_calling(
|
|
258
|
+
self,
|
|
259
|
+
client: AsyncOpenAI,
|
|
260
|
+
messages: List[LLMMessage],
|
|
261
|
+
model: str,
|
|
262
|
+
temperature: float,
|
|
263
|
+
max_tokens: Optional[int],
|
|
264
|
+
functions: Optional[List[Dict[str, Any]]] = None,
|
|
265
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
266
|
+
tool_choice: Optional[Any] = None,
|
|
267
|
+
return_chunks: bool = False,
|
|
268
|
+
**kwargs,
|
|
269
|
+
) -> AsyncGenerator[Union[str, StreamChunk], None]:
|
|
270
|
+
"""
|
|
271
|
+
Stream text with Function Calling support (OpenAI-compatible).
|
|
272
|
+
|
|
273
|
+
This is a helper method that can be called by subclasses to implement
|
|
274
|
+
stream_text() with Function Calling support.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
client: AsyncOpenAI client instance
|
|
278
|
+
messages: List of LLM messages
|
|
279
|
+
model: Model name
|
|
280
|
+
temperature: Temperature for generation
|
|
281
|
+
max_tokens: Maximum tokens to generate
|
|
282
|
+
functions: List of function schemas (legacy format)
|
|
283
|
+
tools: List of tool schemas (new format)
|
|
284
|
+
tool_choice: Tool choice strategy
|
|
285
|
+
return_chunks: If True, returns StreamChunk objects; if False, returns str tokens only
|
|
286
|
+
**kwargs: Additional arguments
|
|
287
|
+
|
|
288
|
+
Yields:
|
|
289
|
+
str or StreamChunk: Text tokens as they are generated, or StreamChunk objects if return_chunks=True
|
|
290
|
+
|
|
291
|
+
Note:
|
|
292
|
+
When return_chunks=True, yields StreamChunk objects that can contain:
|
|
293
|
+
- type="token": Text token content
|
|
294
|
+
- type="tool_call": Tool call information (accumulated)
|
|
295
|
+
- type="tool_calls": Complete tool calls list (at end of stream)
|
|
296
|
+
"""
|
|
297
|
+
# Convert messages to OpenAI format
|
|
298
|
+
openai_messages = self._convert_messages_to_openai_format(messages)
|
|
299
|
+
|
|
300
|
+
# Prepare API call parameters
|
|
301
|
+
api_params: Dict[str, Any] = {
|
|
302
|
+
"model": model,
|
|
303
|
+
"messages": cast(Any, openai_messages), # type: ignore[arg-type]
|
|
304
|
+
"temperature": temperature,
|
|
305
|
+
"max_tokens": max_tokens,
|
|
306
|
+
"stream": True,
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
# Add function calling support
|
|
310
|
+
fc_params = self._prepare_function_calling_params(functions, tools, tool_choice)
|
|
311
|
+
api_params.update(fc_params)
|
|
312
|
+
|
|
313
|
+
# Add any additional kwargs
|
|
314
|
+
api_params.update(kwargs)
|
|
315
|
+
|
|
316
|
+
# Stream response
|
|
317
|
+
stream = await client.chat.completions.create(**api_params)
|
|
318
|
+
|
|
319
|
+
# Accumulator for tool calls
|
|
320
|
+
tool_calls_accumulator: Dict[str, Dict[str, Any]] = {}
|
|
321
|
+
|
|
322
|
+
if hasattr(stream, "__aiter__"):
|
|
323
|
+
async for chunk in stream:
|
|
324
|
+
delta = chunk.choices[0].delta
|
|
325
|
+
|
|
326
|
+
# Yield text tokens
|
|
327
|
+
if delta.content:
|
|
328
|
+
if return_chunks:
|
|
329
|
+
yield StreamChunk(type="token", content=delta.content)
|
|
330
|
+
else:
|
|
331
|
+
yield delta.content
|
|
332
|
+
|
|
333
|
+
# Accumulate tool calls
|
|
334
|
+
if delta.tool_calls:
|
|
335
|
+
for tool_call_delta in delta.tool_calls:
|
|
336
|
+
call_id = tool_call_delta.id
|
|
337
|
+
|
|
338
|
+
# Initialize accumulator for this call if needed
|
|
339
|
+
if call_id not in tool_calls_accumulator:
|
|
340
|
+
tool_calls_accumulator[call_id] = {
|
|
341
|
+
"id": call_id,
|
|
342
|
+
"type": "function",
|
|
343
|
+
"function": {"name": "", "arguments": ""},
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
# Accumulate function name and arguments
|
|
347
|
+
if tool_call_delta.function:
|
|
348
|
+
if tool_call_delta.function.name:
|
|
349
|
+
tool_calls_accumulator[call_id]["function"]["name"] = tool_call_delta.function.name
|
|
350
|
+
if tool_call_delta.function.arguments:
|
|
351
|
+
tool_calls_accumulator[call_id]["function"]["arguments"] += tool_call_delta.function.arguments
|
|
352
|
+
|
|
353
|
+
# Yield tool call update if return_chunks=True
|
|
354
|
+
if return_chunks:
|
|
355
|
+
yield StreamChunk(
|
|
356
|
+
type="tool_call",
|
|
357
|
+
tool_call=tool_calls_accumulator[call_id].copy(),
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
# At the end of stream, yield complete tool_calls if any
|
|
361
|
+
if tool_calls_accumulator and return_chunks:
|
|
362
|
+
complete_tool_calls = list(tool_calls_accumulator.values())
|
|
363
|
+
yield StreamChunk(
|
|
364
|
+
type="tool_calls",
|
|
365
|
+
tool_calls=complete_tool_calls,
|
|
366
|
+
)
|
|
367
|
+
|