aiecs 1.0.1__py3-none-any.whl → 1.7.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +13 -16
- aiecs/__main__.py +7 -7
- aiecs/aiecs_client.py +269 -75
- aiecs/application/executors/operation_executor.py +79 -54
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
- aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
- aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
- aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +98 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
- aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
- aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
- aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
- aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
- aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
- aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
- aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
- aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
- aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
- aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
- aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
- aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
- aiecs/application/knowledge_graph/search/reranker.py +293 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +471 -0
- aiecs/config/__init__.py +20 -5
- aiecs/config/config.py +762 -31
- aiecs/config/graph_config.py +131 -0
- aiecs/config/tool_config.py +435 -0
- aiecs/core/__init__.py +29 -13
- aiecs/core/interface/__init__.py +2 -2
- aiecs/core/interface/execution_interface.py +22 -22
- aiecs/core/interface/storage_interface.py +37 -88
- aiecs/core/registry/__init__.py +31 -0
- aiecs/core/registry/service_registry.py +92 -0
- aiecs/domain/__init__.py +270 -1
- aiecs/domain/agent/__init__.py +191 -0
- aiecs/domain/agent/base_agent.py +3949 -0
- aiecs/domain/agent/exceptions.py +99 -0
- aiecs/domain/agent/graph_aware_mixin.py +569 -0
- aiecs/domain/agent/hybrid_agent.py +1731 -0
- aiecs/domain/agent/integration/__init__.py +29 -0
- aiecs/domain/agent/integration/context_compressor.py +216 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
- aiecs/domain/agent/integration/protocols.py +281 -0
- aiecs/domain/agent/integration/retry_policy.py +218 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
- aiecs/domain/agent/lifecycle.py +291 -0
- aiecs/domain/agent/llm_agent.py +692 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +1124 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +163 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
- aiecs/domain/agent/models.py +894 -0
- aiecs/domain/agent/observability.py +479 -0
- aiecs/domain/agent/persistence.py +449 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +159 -0
- aiecs/domain/agent/prompts/formatters.py +187 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +253 -0
- aiecs/domain/agent/tool_agent.py +444 -0
- aiecs/domain/agent/tools/__init__.py +15 -0
- aiecs/domain/agent/tools/schema_generator.py +377 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +469 -0
- aiecs/domain/community/analytics.py +432 -0
- aiecs/domain/community/collaborative_workflow.py +648 -0
- aiecs/domain/community/communication_hub.py +634 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +796 -0
- aiecs/domain/community/community_manager.py +803 -0
- aiecs/domain/community/decision_engine.py +849 -0
- aiecs/domain/community/exceptions.py +231 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +234 -0
- aiecs/domain/community/resource_manager.py +461 -0
- aiecs/domain/community/shared_context_manager.py +589 -0
- aiecs/domain/context/__init__.py +40 -10
- aiecs/domain/context/context_engine.py +1910 -0
- aiecs/domain/context/conversation_models.py +87 -53
- aiecs/domain/context/graph_memory.py +582 -0
- aiecs/domain/execution/model.py +12 -4
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +148 -0
- aiecs/domain/knowledge_graph/models/evidence.py +178 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
- aiecs/domain/knowledge_graph/models/path.py +171 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
- aiecs/domain/knowledge_graph/models/query.py +261 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
- aiecs/domain/knowledge_graph/models/relation.py +202 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
- aiecs/domain/task/dsl_processor.py +172 -56
- aiecs/domain/task/model.py +20 -8
- aiecs/domain/task/task_context.py +27 -24
- aiecs/infrastructure/__init__.py +0 -2
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +837 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
- aiecs/infrastructure/graph_storage/cache.py +424 -0
- aiecs/infrastructure/graph_storage/distributed.py +223 -0
- aiecs/infrastructure/graph_storage/error_handling.py +380 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
- aiecs/infrastructure/graph_storage/metrics.py +344 -0
- aiecs/infrastructure/graph_storage/migration.py +400 -0
- aiecs/infrastructure/graph_storage/pagination.py +483 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
- aiecs/infrastructure/graph_storage/postgres.py +1563 -0
- aiecs/infrastructure/graph_storage/property_storage.py +353 -0
- aiecs/infrastructure/graph_storage/protocols.py +76 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
- aiecs/infrastructure/graph_storage/streaming.py +487 -0
- aiecs/infrastructure/graph_storage/tenant.py +412 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
- aiecs/infrastructure/messaging/websocket_manager.py +51 -35
- aiecs/infrastructure/monitoring/__init__.py +22 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
- aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
- aiecs/infrastructure/monitoring/structured_logger.py +3 -7
- aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
- aiecs/infrastructure/persistence/__init__.py +14 -1
- aiecs/infrastructure/persistence/context_engine_client.py +184 -0
- aiecs/infrastructure/persistence/database_manager.py +67 -43
- aiecs/infrastructure/persistence/file_storage.py +180 -103
- aiecs/infrastructure/persistence/redis_client.py +74 -21
- aiecs/llm/__init__.py +73 -25
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
- aiecs/llm/client_factory.py +230 -37
- aiecs/llm/client_resolver.py +155 -0
- aiecs/llm/clients/__init__.py +38 -0
- aiecs/llm/clients/base_client.py +328 -0
- aiecs/llm/clients/google_function_calling_mixin.py +415 -0
- aiecs/llm/clients/googleai_client.py +314 -0
- aiecs/llm/clients/openai_client.py +158 -0
- aiecs/llm/clients/openai_compatible_mixin.py +367 -0
- aiecs/llm/clients/vertex_client.py +1186 -0
- aiecs/llm/clients/xai_client.py +201 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +272 -0
- aiecs/llm/config/config_validator.py +206 -0
- aiecs/llm/config/model_config.py +143 -0
- aiecs/llm/protocols.py +149 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +89 -0
- aiecs/main.py +140 -121
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/module_checker.py +499 -0
- aiecs/scripts/aid/version_manager.py +235 -0
- aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
- aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
- aiecs/scripts/dependance_check/__init__.py +15 -0
- aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
- aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
- aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
- aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
- aiecs/scripts/knowledge_graph/__init__.py +3 -0
- aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
- aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
- aiecs/scripts/tools_develop/README.md +671 -0
- aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
- aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
- aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
- aiecs/scripts/tools_develop/schema_coverage.py +511 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
- aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
- aiecs/scripts/tools_develop/verify_tools.py +352 -0
- aiecs/tasks/__init__.py +0 -1
- aiecs/tasks/worker.py +115 -47
- aiecs/tools/__init__.py +194 -72
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +330 -0
- aiecs/tools/apisource/providers/__init__.py +112 -0
- aiecs/tools/apisource/providers/base.py +671 -0
- aiecs/tools/apisource/providers/census.py +397 -0
- aiecs/tools/apisource/providers/fred.py +535 -0
- aiecs/tools/apisource/providers/newsapi.py +409 -0
- aiecs/tools/apisource/providers/worldbank.py +352 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +363 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
- aiecs/tools/apisource/tool.py +832 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +334 -0
- aiecs/tools/base_tool.py +415 -21
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +607 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
- aiecs/tools/docs/content_insertion_tool.py +1320 -0
- aiecs/tools/docs/document_creator_tool.py +1464 -0
- aiecs/tools/docs/document_layout_tool.py +1160 -0
- aiecs/tools/docs/document_parser_tool.py +1016 -0
- aiecs/tools/docs/document_writer_tool.py +2008 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
- aiecs/tools/langchain_adapter.py +300 -138
- aiecs/tools/schema_generator.py +455 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +581 -0
- aiecs/tools/search_tool/cache.py +264 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +224 -0
- aiecs/tools/search_tool/core.py +778 -0
- aiecs/tools/search_tool/deduplicator.py +119 -0
- aiecs/tools/search_tool/error_handler.py +242 -0
- aiecs/tools/search_tool/metrics.py +343 -0
- aiecs/tools/search_tool/rate_limiter.py +172 -0
- aiecs/tools/search_tool/schemas.py +275 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
- aiecs/tools/statistics/data_loader_tool.py +555 -0
- aiecs/tools/statistics/data_profiler_tool.py +638 -0
- aiecs/tools/statistics/data_transformer_tool.py +580 -0
- aiecs/tools/statistics/data_visualizer_tool.py +498 -0
- aiecs/tools/statistics/model_trainer_tool.py +507 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
- aiecs/tools/task_tools/__init__.py +49 -36
- aiecs/tools/task_tools/chart_tool.py +200 -184
- aiecs/tools/task_tools/classfire_tool.py +268 -267
- aiecs/tools/task_tools/image_tool.py +220 -141
- aiecs/tools/task_tools/office_tool.py +226 -146
- aiecs/tools/task_tools/pandas_tool.py +477 -121
- aiecs/tools/task_tools/report_tool.py +390 -142
- aiecs/tools/task_tools/research_tool.py +149 -79
- aiecs/tools/task_tools/scraper_tool.py +339 -145
- aiecs/tools/task_tools/stats_tool.py +448 -209
- aiecs/tools/temp_file_manager.py +26 -24
- aiecs/tools/tool_executor/__init__.py +18 -16
- aiecs/tools/tool_executor/tool_executor.py +364 -52
- aiecs/utils/LLM_output_structor.py +74 -48
- aiecs/utils/__init__.py +14 -3
- aiecs/utils/base_callback.py +0 -3
- aiecs/utils/cache_provider.py +696 -0
- aiecs/utils/execution_utils.py +50 -31
- aiecs/utils/prompt_loader.py +1 -0
- aiecs/utils/token_usage_repository.py +37 -11
- aiecs/ws/socket_server.py +14 -4
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/METADATA +52 -15
- aiecs-1.7.17.dist-info/RECORD +337 -0
- aiecs-1.7.17.dist-info/entry_points.txt +13 -0
- aiecs/config/registry.py +0 -19
- aiecs/domain/context/content_engine.py +0 -982
- aiecs/llm/base_client.py +0 -99
- aiecs/llm/openai_client.py +0 -125
- aiecs/llm/vertex_client.py +0 -186
- aiecs/llm/xai_client.py +0 -184
- aiecs/scripts/dependency_checker.py +0 -857
- aiecs/scripts/quick_dependency_check.py +0 -269
- aiecs/tools/task_tools/search_api.py +0 -7
- aiecs-1.0.1.dist-info/RECORD +0 -90
- aiecs-1.0.1.dist-info/entry_points.txt +0 -7
- /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
- /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
- /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
- /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/WHEEL +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,415 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Google Function Calling Mixin
|
|
3
|
+
|
|
4
|
+
Provides shared implementation for Google providers (Vertex AI, Google AI)
|
|
5
|
+
that use FunctionDeclaration format for Function Calling.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
from typing import Dict, Any, Optional, List, Union, AsyncGenerator
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from vertexai.generative_models import (
|
|
13
|
+
FunctionDeclaration,
|
|
14
|
+
Tool,
|
|
15
|
+
)
|
|
16
|
+
from .base_client import LLMMessage, LLMResponse
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
# Import StreamChunk from OpenAI mixin for compatibility
|
|
21
|
+
try:
|
|
22
|
+
from .openai_compatible_mixin import StreamChunk
|
|
23
|
+
except ImportError:
|
|
24
|
+
# Fallback if not available
|
|
25
|
+
@dataclass
|
|
26
|
+
class StreamChunk:
|
|
27
|
+
"""Fallback StreamChunk definition"""
|
|
28
|
+
type: str
|
|
29
|
+
content: Optional[str] = None
|
|
30
|
+
tool_call: Optional[Dict[str, Any]] = None
|
|
31
|
+
tool_calls: Optional[List[Dict[str, Any]]] = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _serialize_function_args(args) -> str:
|
|
35
|
+
"""
|
|
36
|
+
Safely serialize function call arguments to JSON string.
|
|
37
|
+
|
|
38
|
+
Handles MapComposite/protobuf objects from Vertex AI by converting
|
|
39
|
+
them to regular dicts before JSON serialization.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
args: Function call arguments (may be MapComposite, dict, or other)
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
JSON string representation of the arguments
|
|
46
|
+
"""
|
|
47
|
+
if args is None:
|
|
48
|
+
return "{}"
|
|
49
|
+
|
|
50
|
+
# Handle MapComposite/protobuf objects (they have items() method)
|
|
51
|
+
if hasattr(args, 'items'):
|
|
52
|
+
# Convert to regular dict
|
|
53
|
+
args_dict = dict(args)
|
|
54
|
+
elif isinstance(args, dict):
|
|
55
|
+
args_dict = args
|
|
56
|
+
else:
|
|
57
|
+
# Try to convert to dict if possible
|
|
58
|
+
try:
|
|
59
|
+
args_dict = dict(args)
|
|
60
|
+
except (TypeError, ValueError):
|
|
61
|
+
# Last resort: use str() but this should rarely happen
|
|
62
|
+
return str(args)
|
|
63
|
+
|
|
64
|
+
return json.dumps(args_dict, ensure_ascii=False)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class GoogleFunctionCallingMixin:
|
|
68
|
+
"""
|
|
69
|
+
Mixin class providing Google Function Calling implementation.
|
|
70
|
+
|
|
71
|
+
This mixin can be used by Google providers (Vertex AI, Google AI)
|
|
72
|
+
that use FunctionDeclaration format for Function Calling.
|
|
73
|
+
|
|
74
|
+
Usage:
|
|
75
|
+
class VertexAIClient(BaseLLMClient, GoogleFunctionCallingMixin):
|
|
76
|
+
async def generate_text(self, messages, tools=None, ...):
|
|
77
|
+
if tools:
|
|
78
|
+
vertex_tools = self._convert_openai_to_google_format(tools)
|
|
79
|
+
# Use in API call
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def _convert_openai_to_google_format(
|
|
83
|
+
self, tools: List[Dict[str, Any]]
|
|
84
|
+
) -> List[Tool]:
|
|
85
|
+
"""
|
|
86
|
+
Convert OpenAI tools format to Google FunctionDeclaration format.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
tools: List of OpenAI-format tool dictionaries
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
List of Google Tool objects containing FunctionDeclaration
|
|
93
|
+
"""
|
|
94
|
+
function_declarations = []
|
|
95
|
+
|
|
96
|
+
for tool in tools:
|
|
97
|
+
if tool.get("type") == "function":
|
|
98
|
+
func = tool.get("function", {})
|
|
99
|
+
func_name = func.get("name", "")
|
|
100
|
+
func_description = func.get("description", "")
|
|
101
|
+
func_parameters = func.get("parameters", {})
|
|
102
|
+
|
|
103
|
+
if not func_name:
|
|
104
|
+
logger.warning(f"Skipping tool without name: {tool}")
|
|
105
|
+
continue
|
|
106
|
+
|
|
107
|
+
# Create FunctionDeclaration with raw dict parameters
|
|
108
|
+
# Let Vertex SDK handle the schema conversion internally
|
|
109
|
+
function_declaration = FunctionDeclaration(
|
|
110
|
+
name=func_name,
|
|
111
|
+
description=func_description,
|
|
112
|
+
parameters=func_parameters,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
function_declarations.append(function_declaration)
|
|
116
|
+
else:
|
|
117
|
+
logger.warning(f"Unsupported tool type: {tool.get('type')}")
|
|
118
|
+
|
|
119
|
+
# Wrap in Tool objects (Google format requires tools to be wrapped)
|
|
120
|
+
if function_declarations:
|
|
121
|
+
return [Tool(function_declarations=function_declarations)]
|
|
122
|
+
return []
|
|
123
|
+
|
|
124
|
+
def _extract_function_calls_from_google_response(
|
|
125
|
+
self, response: Any
|
|
126
|
+
) -> Optional[List[Dict[str, Any]]]:
|
|
127
|
+
"""
|
|
128
|
+
Extract function calls from Google Vertex AI response.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
response: Response object from Google Vertex AI API
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
List of function call dictionaries in OpenAI-compatible format,
|
|
135
|
+
or None if no function calls found
|
|
136
|
+
"""
|
|
137
|
+
function_calls = []
|
|
138
|
+
|
|
139
|
+
# Check for function calls in response
|
|
140
|
+
# Google Vertex AI returns function calls in different places depending on API version
|
|
141
|
+
if hasattr(response, "candidates") and response.candidates:
|
|
142
|
+
candidate = response.candidates[0]
|
|
143
|
+
|
|
144
|
+
# Check for function_call attribute (older API)
|
|
145
|
+
if hasattr(candidate, "function_call") and candidate.function_call:
|
|
146
|
+
func_call = candidate.function_call
|
|
147
|
+
function_calls.append({
|
|
148
|
+
"id": f"call_{len(function_calls)}",
|
|
149
|
+
"type": "function",
|
|
150
|
+
"function": {
|
|
151
|
+
"name": func_call.name,
|
|
152
|
+
"arguments": _serialize_function_args(func_call.args) if hasattr(func_call, "args") else "{}",
|
|
153
|
+
},
|
|
154
|
+
})
|
|
155
|
+
|
|
156
|
+
# Check for content.parts with function_call (newer API)
|
|
157
|
+
elif hasattr(candidate, "content") and hasattr(candidate.content, "parts"):
|
|
158
|
+
for part in candidate.content.parts:
|
|
159
|
+
if hasattr(part, "function_call") and part.function_call:
|
|
160
|
+
func_call = part.function_call
|
|
161
|
+
function_calls.append({
|
|
162
|
+
"id": f"call_{len(function_calls)}",
|
|
163
|
+
"type": "function",
|
|
164
|
+
"function": {
|
|
165
|
+
"name": func_call.name,
|
|
166
|
+
"arguments": _serialize_function_args(func_call.args) if hasattr(func_call, "args") else "{}",
|
|
167
|
+
},
|
|
168
|
+
})
|
|
169
|
+
|
|
170
|
+
return function_calls if function_calls else None
|
|
171
|
+
|
|
172
|
+
def _attach_function_calls_to_response(
|
|
173
|
+
self,
|
|
174
|
+
response: LLMResponse,
|
|
175
|
+
function_calls: Optional[List[Dict[str, Any]]] = None,
|
|
176
|
+
) -> LLMResponse:
|
|
177
|
+
"""
|
|
178
|
+
Attach function call information to LLMResponse.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
response: LLMResponse object
|
|
182
|
+
function_calls: List of function call dictionaries
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
LLMResponse with function call info attached
|
|
186
|
+
"""
|
|
187
|
+
if function_calls:
|
|
188
|
+
setattr(response, "tool_calls", function_calls)
|
|
189
|
+
return response
|
|
190
|
+
|
|
191
|
+
def _convert_messages_to_google_format(
|
|
192
|
+
self, messages: List[LLMMessage]
|
|
193
|
+
) -> List[Dict[str, Any]]:
|
|
194
|
+
"""
|
|
195
|
+
Convert LLMMessage list to Google message format.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
messages: List of LLMMessage objects
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
List of Google-format message dictionaries
|
|
202
|
+
"""
|
|
203
|
+
google_messages = []
|
|
204
|
+
|
|
205
|
+
for msg in messages:
|
|
206
|
+
# Google format uses "role" and "parts" structure
|
|
207
|
+
parts = []
|
|
208
|
+
|
|
209
|
+
if msg.content:
|
|
210
|
+
parts.append({"text": msg.content})
|
|
211
|
+
|
|
212
|
+
# Handle tool responses (role="tool")
|
|
213
|
+
if msg.role == "tool" and msg.tool_call_id:
|
|
214
|
+
# Google format uses function_response
|
|
215
|
+
# Note: This may need adjustment based on actual API format
|
|
216
|
+
if msg.content:
|
|
217
|
+
parts.append({
|
|
218
|
+
"function_response": {
|
|
219
|
+
"name": msg.tool_call_id, # May need mapping
|
|
220
|
+
"response": {"result": msg.content},
|
|
221
|
+
}
|
|
222
|
+
})
|
|
223
|
+
|
|
224
|
+
if parts:
|
|
225
|
+
google_messages.append({
|
|
226
|
+
"role": msg.role if msg.role != "tool" else "model", # Adjust role mapping
|
|
227
|
+
"parts": parts,
|
|
228
|
+
})
|
|
229
|
+
|
|
230
|
+
return google_messages
|
|
231
|
+
|
|
232
|
+
def _extract_function_calls_from_google_chunk(
|
|
233
|
+
self, chunk: Any
|
|
234
|
+
) -> Optional[List[Dict[str, Any]]]:
|
|
235
|
+
"""
|
|
236
|
+
Extract function calls from Google Vertex AI streaming chunk.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
chunk: Streaming chunk object from Google Vertex AI API
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
List of function call dictionaries in OpenAI-compatible format,
|
|
243
|
+
or None if no function calls found
|
|
244
|
+
"""
|
|
245
|
+
function_calls = []
|
|
246
|
+
|
|
247
|
+
# Check for function calls in chunk
|
|
248
|
+
if hasattr(chunk, "candidates") and chunk.candidates:
|
|
249
|
+
candidate = chunk.candidates[0]
|
|
250
|
+
|
|
251
|
+
# Check for content.parts with function_call
|
|
252
|
+
if hasattr(candidate, "content") and hasattr(candidate.content, "parts"):
|
|
253
|
+
for part in candidate.content.parts:
|
|
254
|
+
if hasattr(part, "function_call") and part.function_call:
|
|
255
|
+
func_call = part.function_call
|
|
256
|
+
function_calls.append({
|
|
257
|
+
"id": f"call_{len(function_calls)}",
|
|
258
|
+
"type": "function",
|
|
259
|
+
"function": {
|
|
260
|
+
"name": func_call.name,
|
|
261
|
+
"arguments": _serialize_function_args(func_call.args) if hasattr(func_call, "args") else "{}",
|
|
262
|
+
},
|
|
263
|
+
})
|
|
264
|
+
|
|
265
|
+
# Check for function_call attribute directly on candidate
|
|
266
|
+
elif hasattr(candidate, "function_call") and candidate.function_call:
|
|
267
|
+
func_call = candidate.function_call
|
|
268
|
+
function_calls.append({
|
|
269
|
+
"id": f"call_{len(function_calls)}",
|
|
270
|
+
"type": "function",
|
|
271
|
+
"function": {
|
|
272
|
+
"name": func_call.name,
|
|
273
|
+
"arguments": _serialize_function_args(func_call.args) if hasattr(func_call, "args") else "{}",
|
|
274
|
+
},
|
|
275
|
+
})
|
|
276
|
+
|
|
277
|
+
return function_calls if function_calls else None
|
|
278
|
+
|
|
279
|
+
async def _stream_text_with_function_calling(
|
|
280
|
+
self,
|
|
281
|
+
model_instance: Any,
|
|
282
|
+
contents: Any,
|
|
283
|
+
generation_config: Any,
|
|
284
|
+
safety_settings: List[Any],
|
|
285
|
+
tools: Optional[List[Tool]] = None,
|
|
286
|
+
return_chunks: bool = False,
|
|
287
|
+
**kwargs,
|
|
288
|
+
) -> AsyncGenerator[Union[str, StreamChunk], None]:
|
|
289
|
+
"""
|
|
290
|
+
Stream text with Function Calling support (Google Vertex AI format).
|
|
291
|
+
|
|
292
|
+
Args:
|
|
293
|
+
model_instance: GenerativeModel instance (should include system_instruction)
|
|
294
|
+
contents: Input contents (string or list of Content objects)
|
|
295
|
+
generation_config: GenerationConfig object
|
|
296
|
+
safety_settings: List of SafetySetting objects
|
|
297
|
+
tools: List of Tool objects (Google format)
|
|
298
|
+
return_chunks: If True, returns StreamChunk objects; if False, returns str tokens only
|
|
299
|
+
**kwargs: Additional arguments
|
|
300
|
+
|
|
301
|
+
Yields:
|
|
302
|
+
str or StreamChunk: Text tokens or StreamChunk objects
|
|
303
|
+
"""
|
|
304
|
+
# Build API call parameters
|
|
305
|
+
api_params = {
|
|
306
|
+
"contents": contents,
|
|
307
|
+
"generation_config": generation_config,
|
|
308
|
+
"safety_settings": safety_settings,
|
|
309
|
+
"stream": True,
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
# Add tools if available
|
|
313
|
+
if tools:
|
|
314
|
+
api_params["tools"] = tools
|
|
315
|
+
|
|
316
|
+
# Add any additional kwargs
|
|
317
|
+
api_params.update(kwargs)
|
|
318
|
+
|
|
319
|
+
# Get streaming response
|
|
320
|
+
stream_response = model_instance.generate_content(**api_params)
|
|
321
|
+
|
|
322
|
+
# Accumulator for tool calls
|
|
323
|
+
tool_calls_accumulator: Dict[str, Dict[str, Any]] = {}
|
|
324
|
+
|
|
325
|
+
# Stream chunks
|
|
326
|
+
import asyncio
|
|
327
|
+
first_chunk_checked = False
|
|
328
|
+
|
|
329
|
+
for chunk in stream_response:
|
|
330
|
+
# Yield control to event loop
|
|
331
|
+
await asyncio.sleep(0)
|
|
332
|
+
|
|
333
|
+
# Check for prompt-level safety blocks
|
|
334
|
+
if not first_chunk_checked and hasattr(chunk, "prompt_feedback"):
|
|
335
|
+
pf = chunk.prompt_feedback
|
|
336
|
+
if hasattr(pf, "block_reason") and pf.block_reason:
|
|
337
|
+
block_reason = str(pf.block_reason)
|
|
338
|
+
if block_reason not in ["BLOCKED_REASON_UNSPECIFIED", "OTHER"]:
|
|
339
|
+
from .base_client import SafetyBlockError
|
|
340
|
+
raise SafetyBlockError(
|
|
341
|
+
"Prompt blocked by safety filters",
|
|
342
|
+
block_reason=block_reason,
|
|
343
|
+
block_type="prompt",
|
|
344
|
+
)
|
|
345
|
+
first_chunk_checked = True
|
|
346
|
+
|
|
347
|
+
# Extract text content and function calls
|
|
348
|
+
if hasattr(chunk, "candidates") and chunk.candidates:
|
|
349
|
+
candidate = chunk.candidates[0]
|
|
350
|
+
|
|
351
|
+
# Check for safety blocks in response
|
|
352
|
+
if hasattr(candidate, "finish_reason"):
|
|
353
|
+
finish_reason = candidate.finish_reason
|
|
354
|
+
if finish_reason in ["SAFETY", "RECITATION"]:
|
|
355
|
+
from .base_client import SafetyBlockError
|
|
356
|
+
raise SafetyBlockError(
|
|
357
|
+
"Response blocked by safety filters",
|
|
358
|
+
block_type="response",
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
# Extract text from chunk parts
|
|
362
|
+
if hasattr(candidate, "content") and hasattr(candidate.content, "parts"):
|
|
363
|
+
for part in candidate.content.parts:
|
|
364
|
+
if hasattr(part, "text") and part.text:
|
|
365
|
+
text_content = part.text
|
|
366
|
+
if return_chunks:
|
|
367
|
+
yield StreamChunk(type="token", content=text_content)
|
|
368
|
+
else:
|
|
369
|
+
yield text_content
|
|
370
|
+
|
|
371
|
+
# Also check if text is directly available
|
|
372
|
+
elif hasattr(candidate, "text") and candidate.text:
|
|
373
|
+
text_content = candidate.text
|
|
374
|
+
if return_chunks:
|
|
375
|
+
yield StreamChunk(type="token", content=text_content)
|
|
376
|
+
else:
|
|
377
|
+
yield text_content
|
|
378
|
+
|
|
379
|
+
# Extract and accumulate function calls
|
|
380
|
+
function_calls = self._extract_function_calls_from_google_chunk(chunk)
|
|
381
|
+
if function_calls:
|
|
382
|
+
for func_call in function_calls:
|
|
383
|
+
call_id = func_call["id"]
|
|
384
|
+
|
|
385
|
+
# Initialize accumulator if needed
|
|
386
|
+
if call_id not in tool_calls_accumulator:
|
|
387
|
+
tool_calls_accumulator[call_id] = func_call.copy()
|
|
388
|
+
else:
|
|
389
|
+
# Update accumulator (merge arguments if needed)
|
|
390
|
+
existing_call = tool_calls_accumulator[call_id]
|
|
391
|
+
if func_call["function"]["name"]:
|
|
392
|
+
existing_call["function"]["name"] = func_call["function"]["name"]
|
|
393
|
+
if func_call["function"]["arguments"]:
|
|
394
|
+
# Merge arguments (Google may send partial arguments)
|
|
395
|
+
existing_args = existing_call["function"].get("arguments", "{}")
|
|
396
|
+
new_args = func_call["function"]["arguments"]
|
|
397
|
+
# Simple merge: append new args (may need JSON parsing for proper merge)
|
|
398
|
+
if new_args and new_args != "{}":
|
|
399
|
+
existing_call["function"]["arguments"] = new_args
|
|
400
|
+
|
|
401
|
+
# Yield tool call update if return_chunks=True
|
|
402
|
+
if return_chunks:
|
|
403
|
+
yield StreamChunk(
|
|
404
|
+
type="tool_call",
|
|
405
|
+
tool_call=tool_calls_accumulator[call_id].copy(),
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
# At the end of stream, yield complete tool_calls if any
|
|
409
|
+
if tool_calls_accumulator and return_chunks:
|
|
410
|
+
complete_tool_calls = list(tool_calls_accumulator.values())
|
|
411
|
+
yield StreamChunk(
|
|
412
|
+
type="tool_calls",
|
|
413
|
+
tool_calls=complete_tool_calls,
|
|
414
|
+
)
|
|
415
|
+
|