ag2 0.9.6__py3-none-any.whl → 0.9.8.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.6.dist-info → ag2-0.9.8.post1.dist-info}/METADATA +102 -75
- ag2-0.9.8.post1.dist-info/RECORD +387 -0
- autogen/__init__.py +1 -2
- autogen/_website/generate_api_references.py +4 -5
- autogen/_website/generate_mkdocs.py +9 -15
- autogen/_website/notebook_processor.py +13 -14
- autogen/_website/process_notebooks.py +10 -10
- autogen/_website/utils.py +5 -4
- autogen/agentchat/agent.py +13 -13
- autogen/agentchat/assistant_agent.py +7 -6
- autogen/agentchat/contrib/agent_eval/agent_eval.py +3 -3
- autogen/agentchat/contrib/agent_eval/critic_agent.py +3 -3
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +3 -3
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +3 -3
- autogen/agentchat/contrib/agent_optimizer.py +3 -3
- autogen/agentchat/contrib/capabilities/generate_images.py +11 -11
- autogen/agentchat/contrib/capabilities/teachability.py +15 -15
- autogen/agentchat/contrib/capabilities/transforms.py +17 -18
- autogen/agentchat/contrib/capabilities/transforms_util.py +5 -5
- autogen/agentchat/contrib/capabilities/vision_capability.py +4 -3
- autogen/agentchat/contrib/captainagent/agent_builder.py +30 -30
- autogen/agentchat/contrib/captainagent/captainagent.py +22 -21
- autogen/agentchat/contrib/captainagent/tool_retriever.py +2 -3
- autogen/agentchat/contrib/gpt_assistant_agent.py +9 -9
- autogen/agentchat/contrib/graph_rag/document.py +3 -3
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +3 -3
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +6 -6
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +3 -3
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +5 -11
- autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +6 -6
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +7 -7
- autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +6 -6
- autogen/agentchat/contrib/img_utils.py +1 -1
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +11 -11
- autogen/agentchat/contrib/llava_agent.py +18 -4
- autogen/agentchat/contrib/math_user_proxy_agent.py +11 -11
- autogen/agentchat/contrib/multimodal_conversable_agent.py +8 -8
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +6 -5
- autogen/agentchat/contrib/rag/chromadb_query_engine.py +22 -26
- autogen/agentchat/contrib/rag/llamaindex_query_engine.py +14 -17
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +27 -37
- autogen/agentchat/contrib/rag/query_engine.py +7 -5
- autogen/agentchat/contrib/retrieve_assistant_agent.py +5 -5
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +8 -7
- autogen/agentchat/contrib/society_of_mind_agent.py +15 -14
- autogen/agentchat/contrib/swarm_agent.py +76 -98
- autogen/agentchat/contrib/text_analyzer_agent.py +7 -7
- autogen/agentchat/contrib/vectordb/base.py +10 -18
- autogen/agentchat/contrib/vectordb/chromadb.py +2 -1
- autogen/agentchat/contrib/vectordb/couchbase.py +18 -20
- autogen/agentchat/contrib/vectordb/mongodb.py +6 -5
- autogen/agentchat/contrib/vectordb/pgvectordb.py +40 -41
- autogen/agentchat/contrib/vectordb/qdrant.py +5 -5
- autogen/agentchat/contrib/web_surfer.py +20 -19
- autogen/agentchat/conversable_agent.py +311 -295
- autogen/agentchat/group/context_str.py +1 -3
- autogen/agentchat/group/context_variables.py +15 -25
- autogen/agentchat/group/group_tool_executor.py +10 -10
- autogen/agentchat/group/group_utils.py +15 -15
- autogen/agentchat/group/guardrails.py +7 -7
- autogen/agentchat/group/handoffs.py +19 -36
- autogen/agentchat/group/multi_agent_chat.py +7 -7
- autogen/agentchat/group/on_condition.py +4 -7
- autogen/agentchat/group/on_context_condition.py +4 -7
- autogen/agentchat/group/patterns/auto.py +8 -7
- autogen/agentchat/group/patterns/manual.py +7 -6
- autogen/agentchat/group/patterns/pattern.py +13 -12
- autogen/agentchat/group/patterns/random.py +3 -3
- autogen/agentchat/group/patterns/round_robin.py +3 -3
- autogen/agentchat/group/reply_result.py +2 -4
- autogen/agentchat/group/speaker_selection_result.py +5 -5
- autogen/agentchat/group/targets/group_chat_target.py +7 -6
- autogen/agentchat/group/targets/group_manager_target.py +4 -4
- autogen/agentchat/group/targets/transition_target.py +2 -1
- autogen/agentchat/groupchat.py +58 -61
- autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +4 -4
- autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +4 -4
- autogen/agentchat/realtime/experimental/clients/gemini/client.py +7 -7
- autogen/agentchat/realtime/experimental/clients/oai/base_client.py +8 -8
- autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +6 -6
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +10 -9
- autogen/agentchat/realtime/experimental/realtime_agent.py +10 -9
- autogen/agentchat/realtime/experimental/realtime_observer.py +3 -3
- autogen/agentchat/realtime/experimental/realtime_swarm.py +44 -44
- autogen/agentchat/user_proxy_agent.py +10 -9
- autogen/agentchat/utils.py +3 -3
- autogen/agents/contrib/time/time_reply_agent.py +6 -5
- autogen/agents/contrib/time/time_tool_agent.py +2 -1
- autogen/agents/experimental/deep_research/deep_research.py +3 -3
- autogen/agents/experimental/discord/discord.py +2 -2
- autogen/agents/experimental/document_agent/chroma_query_engine.py +29 -44
- autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +9 -14
- autogen/agents/experimental/document_agent/document_agent.py +15 -16
- autogen/agents/experimental/document_agent/document_conditions.py +3 -3
- autogen/agents/experimental/document_agent/document_utils.py +5 -9
- autogen/agents/experimental/document_agent/inmemory_query_engine.py +14 -20
- autogen/agents/experimental/document_agent/parser_utils.py +4 -4
- autogen/agents/experimental/document_agent/url_utils.py +14 -23
- autogen/agents/experimental/reasoning/reasoning_agent.py +33 -33
- autogen/agents/experimental/slack/slack.py +2 -2
- autogen/agents/experimental/telegram/telegram.py +2 -3
- autogen/agents/experimental/websurfer/websurfer.py +4 -4
- autogen/agents/experimental/wikipedia/wikipedia.py +5 -7
- autogen/browser_utils.py +8 -8
- autogen/cache/abstract_cache_base.py +5 -5
- autogen/cache/cache.py +12 -12
- autogen/cache/cache_factory.py +4 -4
- autogen/cache/cosmos_db_cache.py +9 -9
- autogen/cache/disk_cache.py +6 -6
- autogen/cache/in_memory_cache.py +4 -4
- autogen/cache/redis_cache.py +4 -4
- autogen/code_utils.py +18 -18
- autogen/coding/base.py +6 -6
- autogen/coding/docker_commandline_code_executor.py +9 -9
- autogen/coding/func_with_reqs.py +7 -6
- autogen/coding/jupyter/base.py +3 -3
- autogen/coding/jupyter/docker_jupyter_server.py +3 -4
- autogen/coding/jupyter/import_utils.py +3 -3
- autogen/coding/jupyter/jupyter_client.py +5 -5
- autogen/coding/jupyter/jupyter_code_executor.py +3 -4
- autogen/coding/jupyter/local_jupyter_server.py +2 -6
- autogen/coding/local_commandline_code_executor.py +8 -7
- autogen/coding/markdown_code_extractor.py +1 -2
- autogen/coding/utils.py +1 -2
- autogen/doc_utils.py +3 -2
- autogen/environments/docker_python_environment.py +19 -29
- autogen/environments/python_environment.py +8 -17
- autogen/environments/system_python_environment.py +3 -4
- autogen/environments/venv_python_environment.py +8 -12
- autogen/environments/working_directory.py +1 -2
- autogen/events/agent_events.py +106 -109
- autogen/events/base_event.py +6 -5
- autogen/events/client_events.py +15 -14
- autogen/events/helpers.py +1 -1
- autogen/events/print_event.py +4 -5
- autogen/fast_depends/_compat.py +10 -15
- autogen/fast_depends/core/build.py +17 -36
- autogen/fast_depends/core/model.py +64 -113
- autogen/fast_depends/dependencies/model.py +2 -1
- autogen/fast_depends/dependencies/provider.py +3 -2
- autogen/fast_depends/library/model.py +4 -4
- autogen/fast_depends/schema.py +7 -7
- autogen/fast_depends/use.py +17 -25
- autogen/fast_depends/utils.py +10 -30
- autogen/formatting_utils.py +6 -6
- autogen/graph_utils.py +1 -4
- autogen/import_utils.py +13 -13
- autogen/interop/crewai/crewai.py +2 -2
- autogen/interop/interoperable.py +2 -2
- autogen/interop/langchain/langchain_chat_model_factory.py +3 -2
- autogen/interop/langchain/langchain_tool.py +2 -6
- autogen/interop/litellm/litellm_config_factory.py +6 -7
- autogen/interop/pydantic_ai/pydantic_ai.py +4 -7
- autogen/interop/registry.py +2 -1
- autogen/io/base.py +5 -5
- autogen/io/run_response.py +33 -32
- autogen/io/websockets.py +6 -5
- autogen/json_utils.py +1 -2
- autogen/llm_config/__init__.py +11 -0
- autogen/llm_config/client.py +58 -0
- autogen/llm_config/config.py +384 -0
- autogen/llm_config/entry.py +154 -0
- autogen/logger/base_logger.py +4 -3
- autogen/logger/file_logger.py +2 -1
- autogen/logger/logger_factory.py +2 -2
- autogen/logger/logger_utils.py +2 -2
- autogen/logger/sqlite_logger.py +3 -2
- autogen/math_utils.py +4 -5
- autogen/mcp/__main__.py +6 -6
- autogen/mcp/helpers.py +4 -4
- autogen/mcp/mcp_client.py +170 -29
- autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +3 -4
- autogen/mcp/mcp_proxy/mcp_proxy.py +23 -26
- autogen/mcp/mcp_proxy/operation_grouping.py +4 -5
- autogen/mcp/mcp_proxy/operation_renaming.py +6 -10
- autogen/mcp/mcp_proxy/security.py +2 -3
- autogen/messages/agent_messages.py +96 -98
- autogen/messages/base_message.py +6 -5
- autogen/messages/client_messages.py +15 -14
- autogen/messages/print_message.py +4 -5
- autogen/oai/__init__.py +1 -2
- autogen/oai/anthropic.py +42 -41
- autogen/oai/bedrock.py +68 -57
- autogen/oai/cerebras.py +26 -25
- autogen/oai/client.py +118 -138
- autogen/oai/client_utils.py +3 -3
- autogen/oai/cohere.py +34 -11
- autogen/oai/gemini.py +40 -17
- autogen/oai/gemini_types.py +11 -12
- autogen/oai/groq.py +22 -10
- autogen/oai/mistral.py +17 -11
- autogen/oai/oai_models/__init__.py +14 -2
- autogen/oai/oai_models/_models.py +2 -2
- autogen/oai/oai_models/chat_completion.py +13 -14
- autogen/oai/oai_models/chat_completion_message.py +11 -9
- autogen/oai/oai_models/chat_completion_message_tool_call.py +26 -3
- autogen/oai/oai_models/chat_completion_token_logprob.py +3 -4
- autogen/oai/oai_models/completion_usage.py +8 -9
- autogen/oai/ollama.py +22 -10
- autogen/oai/openai_responses.py +40 -17
- autogen/oai/openai_utils.py +159 -85
- autogen/oai/together.py +29 -14
- autogen/retrieve_utils.py +6 -7
- autogen/runtime_logging.py +5 -4
- autogen/token_count_utils.py +7 -4
- autogen/tools/contrib/time/time.py +0 -1
- autogen/tools/dependency_injection.py +5 -6
- autogen/tools/experimental/browser_use/browser_use.py +10 -10
- autogen/tools/experimental/code_execution/python_code_execution.py +5 -7
- autogen/tools/experimental/crawl4ai/crawl4ai.py +12 -15
- autogen/tools/experimental/deep_research/deep_research.py +9 -8
- autogen/tools/experimental/duckduckgo/duckduckgo_search.py +5 -11
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +98 -115
- autogen/tools/experimental/google/authentication/credentials_local_provider.py +1 -1
- autogen/tools/experimental/google/drive/drive_functions.py +4 -4
- autogen/tools/experimental/google/drive/toolkit.py +5 -5
- autogen/tools/experimental/google_search/google_search.py +5 -5
- autogen/tools/experimental/google_search/youtube_search.py +5 -5
- autogen/tools/experimental/messageplatform/discord/discord.py +8 -12
- autogen/tools/experimental/messageplatform/slack/slack.py +14 -20
- autogen/tools/experimental/messageplatform/telegram/telegram.py +8 -12
- autogen/tools/experimental/perplexity/perplexity_search.py +18 -29
- autogen/tools/experimental/reliable/reliable.py +68 -74
- autogen/tools/experimental/searxng/searxng_search.py +20 -19
- autogen/tools/experimental/tavily/tavily_search.py +12 -19
- autogen/tools/experimental/web_search_preview/web_search_preview.py +13 -7
- autogen/tools/experimental/wikipedia/wikipedia.py +7 -10
- autogen/tools/function_utils.py +7 -7
- autogen/tools/tool.py +6 -5
- autogen/types.py +2 -2
- autogen/version.py +1 -1
- ag2-0.9.6.dist-info/RECORD +0 -421
- autogen/llm_config.py +0 -385
- {ag2-0.9.6.dist-info → ag2-0.9.8.post1.dist-info}/WHEEL +0 -0
- {ag2-0.9.6.dist-info → ag2-0.9.8.post1.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.6.dist-info → ag2-0.9.8.post1.dist-info}/licenses/NOTICE.md +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# SPDX-License-Identifier: MIT
|
|
7
7
|
import copy
|
|
8
8
|
import sys
|
|
9
|
-
from typing import Any,
|
|
9
|
+
from typing import Any, Protocol
|
|
10
10
|
|
|
11
11
|
import tiktoken
|
|
12
12
|
from termcolor import colored
|
|
@@ -62,9 +62,9 @@ class MessageHistoryLimiter:
|
|
|
62
62
|
|
|
63
63
|
def __init__(
|
|
64
64
|
self,
|
|
65
|
-
max_messages:
|
|
65
|
+
max_messages: int | None = None,
|
|
66
66
|
keep_first_message: bool = False,
|
|
67
|
-
exclude_names:
|
|
67
|
+
exclude_names: list[str] | None = None,
|
|
68
68
|
):
|
|
69
69
|
"""Args:
|
|
70
70
|
max_messages Optional[int]: Maximum number of messages to keep in the context. Must be greater than 0 if not None.
|
|
@@ -91,7 +91,6 @@ class MessageHistoryLimiter:
|
|
|
91
91
|
Returns:
|
|
92
92
|
List[Dict]: A new list containing the most recent messages up to the specified maximum.
|
|
93
93
|
"""
|
|
94
|
-
|
|
95
94
|
exclude_names = getattr(self, "_exclude_names", None)
|
|
96
95
|
|
|
97
96
|
filtered = [msg for msg in messages if msg.get("name") not in exclude_names] if exclude_names else messages
|
|
@@ -136,7 +135,7 @@ class MessageHistoryLimiter:
|
|
|
136
135
|
return logs_str, True
|
|
137
136
|
return "No messages were removed.", False
|
|
138
137
|
|
|
139
|
-
def _validate_max_messages(self, max_messages:
|
|
138
|
+
def _validate_max_messages(self, max_messages: int | None):
|
|
140
139
|
if max_messages is not None and max_messages < 1:
|
|
141
140
|
raise ValueError("max_messages must be None or greater than 1")
|
|
142
141
|
|
|
@@ -171,11 +170,11 @@ class MessageTokenLimiter:
|
|
|
171
170
|
|
|
172
171
|
def __init__(
|
|
173
172
|
self,
|
|
174
|
-
max_tokens_per_message:
|
|
175
|
-
max_tokens:
|
|
176
|
-
min_tokens:
|
|
173
|
+
max_tokens_per_message: int | None = None,
|
|
174
|
+
max_tokens: int | None = None,
|
|
175
|
+
min_tokens: int | None = None,
|
|
177
176
|
model: str = "gpt-3.5-turbo-0613",
|
|
178
|
-
filter_dict:
|
|
177
|
+
filter_dict: dict[str, Any] | None = None,
|
|
179
178
|
exclude_filter: bool = True,
|
|
180
179
|
):
|
|
181
180
|
"""Args:
|
|
@@ -268,7 +267,7 @@ class MessageTokenLimiter:
|
|
|
268
267
|
return logs_str, True
|
|
269
268
|
return "No tokens were truncated.", False
|
|
270
269
|
|
|
271
|
-
def _truncate_str_to_tokens(self, contents:
|
|
270
|
+
def _truncate_str_to_tokens(self, contents: str | list, n_tokens: int) -> str | list:
|
|
272
271
|
if isinstance(contents, str):
|
|
273
272
|
return self._truncate_tokens(contents, n_tokens)
|
|
274
273
|
elif isinstance(contents, list):
|
|
@@ -296,7 +295,7 @@ class MessageTokenLimiter:
|
|
|
296
295
|
|
|
297
296
|
return truncated_text
|
|
298
297
|
|
|
299
|
-
def _validate_max_tokens(self, max_tokens:
|
|
298
|
+
def _validate_max_tokens(self, max_tokens: int | None = None) -> int | None:
|
|
300
299
|
if max_tokens is not None and max_tokens < 0:
|
|
301
300
|
raise ValueError("max_tokens and max_tokens_per_message must be None or greater than or equal to 0")
|
|
302
301
|
|
|
@@ -317,7 +316,7 @@ class MessageTokenLimiter:
|
|
|
317
316
|
|
|
318
317
|
return max_tokens if max_tokens is not None else sys.maxsize
|
|
319
318
|
|
|
320
|
-
def _validate_min_tokens(self, min_tokens:
|
|
319
|
+
def _validate_min_tokens(self, min_tokens: int | None, max_tokens: int | None) -> int:
|
|
321
320
|
if min_tokens is None:
|
|
322
321
|
return 0
|
|
323
322
|
if min_tokens < 0:
|
|
@@ -336,11 +335,11 @@ class TextMessageCompressor:
|
|
|
336
335
|
|
|
337
336
|
def __init__(
|
|
338
337
|
self,
|
|
339
|
-
text_compressor:
|
|
340
|
-
min_tokens:
|
|
338
|
+
text_compressor: TextCompressor | None = None,
|
|
339
|
+
min_tokens: int | None = None,
|
|
341
340
|
compression_params: dict = dict(),
|
|
342
|
-
cache:
|
|
343
|
-
filter_dict:
|
|
341
|
+
cache: AbstractCache | None = None,
|
|
342
|
+
filter_dict: dict[str, Any] | None = None,
|
|
344
343
|
exclude_filter: bool = True,
|
|
345
344
|
):
|
|
346
345
|
"""Args:
|
|
@@ -466,7 +465,7 @@ class TextMessageCompressor:
|
|
|
466
465
|
|
|
467
466
|
return compressed_text["compressed_prompt"], savings
|
|
468
467
|
|
|
469
|
-
def _validate_min_tokens(self, min_tokens:
|
|
468
|
+
def _validate_min_tokens(self, min_tokens: int | None):
|
|
470
469
|
if min_tokens is not None and min_tokens <= 0:
|
|
471
470
|
raise ValueError("min_tokens must be greater than 0 or None")
|
|
472
471
|
|
|
@@ -497,7 +496,7 @@ class TextMessageContentName:
|
|
|
497
496
|
position: str = "start",
|
|
498
497
|
format_string: str = "{name}:\n",
|
|
499
498
|
deduplicate: bool = True,
|
|
500
|
-
filter_dict:
|
|
499
|
+
filter_dict: dict[str, Any] | None = None,
|
|
501
500
|
exclude_filter: bool = True,
|
|
502
501
|
):
|
|
503
502
|
"""Args:
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
6
|
# SPDX-License-Identifier: MIT
|
|
7
7
|
from collections.abc import Hashable
|
|
8
|
-
from typing import Any
|
|
8
|
+
from typing import Any
|
|
9
9
|
|
|
10
10
|
from .... import token_count_utils
|
|
11
11
|
from ....cache.abstract_cache_base import AbstractCache
|
|
@@ -24,7 +24,7 @@ def cache_key(content: MessageContentType, *args: Hashable) -> str:
|
|
|
24
24
|
return "".join(str_keys)
|
|
25
25
|
|
|
26
26
|
|
|
27
|
-
def cache_content_get(cache:
|
|
27
|
+
def cache_content_get(cache: AbstractCache | None, key: str) -> tuple[MessageContentType, ...] | None:
|
|
28
28
|
"""Retrieves cached content from the cache.
|
|
29
29
|
|
|
30
30
|
Args:
|
|
@@ -37,7 +37,7 @@ def cache_content_get(cache: Optional[AbstractCache], key: str) -> Optional[tupl
|
|
|
37
37
|
return cached_value
|
|
38
38
|
|
|
39
39
|
|
|
40
|
-
def cache_content_set(cache:
|
|
40
|
+
def cache_content_set(cache: AbstractCache | None, key: str, content: MessageContentType, *extra_values):
|
|
41
41
|
"""Sets content into the cache.
|
|
42
42
|
|
|
43
43
|
Args:
|
|
@@ -51,7 +51,7 @@ def cache_content_set(cache: Optional[AbstractCache], key: str, content: Message
|
|
|
51
51
|
cache.set(key, cache_value)
|
|
52
52
|
|
|
53
53
|
|
|
54
|
-
def min_tokens_reached(messages: list[dict[str, Any]], min_tokens:
|
|
54
|
+
def min_tokens_reached(messages: list[dict[str, Any]], min_tokens: int | None) -> bool:
|
|
55
55
|
"""Returns True if the total number of tokens in the messages is greater than or equal to the specified value.
|
|
56
56
|
|
|
57
57
|
Args:
|
|
@@ -108,7 +108,7 @@ def is_content_text_empty(content: MessageContentType) -> bool:
|
|
|
108
108
|
return True
|
|
109
109
|
|
|
110
110
|
|
|
111
|
-
def should_transform_message(message: dict[str, Any], filter_dict:
|
|
111
|
+
def should_transform_message(message: dict[str, Any], filter_dict: dict[str, Any] | None, exclude: bool) -> bool:
|
|
112
112
|
"""Validates whether the transform should be applied according to the filter dictionary.
|
|
113
113
|
|
|
114
114
|
Args:
|
|
@@ -5,7 +5,8 @@
|
|
|
5
5
|
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
6
|
# SPDX-License-Identifier: MIT
|
|
7
7
|
import copy
|
|
8
|
-
from
|
|
8
|
+
from collections.abc import Callable
|
|
9
|
+
from typing import Any
|
|
9
10
|
|
|
10
11
|
from ....code_utils import content_str
|
|
11
12
|
from ....oai.client import OpenAIWrapper
|
|
@@ -47,7 +48,7 @@ class VisionCapability(AgentCapability):
|
|
|
47
48
|
def __init__(
|
|
48
49
|
self,
|
|
49
50
|
lmm_config: dict[str, Any],
|
|
50
|
-
description_prompt:
|
|
51
|
+
description_prompt: str | None = DEFAULT_DESCRIPTION_PROMPT,
|
|
51
52
|
custom_caption_func: Callable = None,
|
|
52
53
|
) -> None:
|
|
53
54
|
"""Initializes a new instance, setting up the configuration for interacting with
|
|
@@ -101,7 +102,7 @@ class VisionCapability(AgentCapability):
|
|
|
101
102
|
# Register a hook for processing the last message.
|
|
102
103
|
agent.register_hook(hookable_method="process_last_received_message", hook=self.process_last_received_message)
|
|
103
104
|
|
|
104
|
-
def process_last_received_message(self, content:
|
|
105
|
+
def process_last_received_message(self, content: str | list[dict[str, Any]]) -> str:
|
|
105
106
|
"""Processes the last received message content by normalizing and augmenting it
|
|
106
107
|
with descriptions of any included images. The function supports input content
|
|
107
108
|
as either a string or a list of dictionaries, where each dictionary represents
|
|
@@ -11,7 +11,7 @@ import logging
|
|
|
11
11
|
import re
|
|
12
12
|
import subprocess as sp
|
|
13
13
|
import time
|
|
14
|
-
from typing import Any
|
|
14
|
+
from typing import Any
|
|
15
15
|
|
|
16
16
|
from termcolor import colored
|
|
17
17
|
|
|
@@ -184,14 +184,14 @@ Match roles in the role set to each expert in expert set.
|
|
|
184
184
|
|
|
185
185
|
def __init__(
|
|
186
186
|
self,
|
|
187
|
-
config_file_or_env:
|
|
188
|
-
config_file_location:
|
|
189
|
-
llm_config:
|
|
190
|
-
builder_model:
|
|
191
|
-
agent_model:
|
|
192
|
-
builder_model_tags:
|
|
193
|
-
agent_model_tags:
|
|
194
|
-
max_agents:
|
|
187
|
+
config_file_or_env: str | None = "OAI_CONFIG_LIST",
|
|
188
|
+
config_file_location: str | None = "",
|
|
189
|
+
llm_config: LLMConfig | dict[str, Any] | None = None,
|
|
190
|
+
builder_model: str | list | None = [],
|
|
191
|
+
agent_model: str | list | None = [],
|
|
192
|
+
builder_model_tags: list | None = [],
|
|
193
|
+
agent_model_tags: list | None = [],
|
|
194
|
+
max_agents: int | None = 5,
|
|
195
195
|
):
|
|
196
196
|
"""(These APIs are experimental and may change in the future.)
|
|
197
197
|
|
|
@@ -259,8 +259,8 @@ Match roles in the role set to each expert in expert set.
|
|
|
259
259
|
self,
|
|
260
260
|
agent_config: dict[str, Any],
|
|
261
261
|
member_name: list[str],
|
|
262
|
-
llm_config:
|
|
263
|
-
use_oai_assistant:
|
|
262
|
+
llm_config: LLMConfig | dict[str, Any],
|
|
263
|
+
use_oai_assistant: bool | None = False,
|
|
264
264
|
) -> AssistantAgent:
|
|
265
265
|
"""Create a group chat participant agent.
|
|
266
266
|
|
|
@@ -357,7 +357,7 @@ Match roles in the role set to each expert in expert set.
|
|
|
357
357
|
self.agent_procs_assign[agent_name] = (agent, server_id)
|
|
358
358
|
return agent
|
|
359
359
|
|
|
360
|
-
def clear_agent(self, agent_name: str, recycle_endpoint:
|
|
360
|
+
def clear_agent(self, agent_name: str, recycle_endpoint: bool | None = True):
|
|
361
361
|
"""Clear a specific agent by name.
|
|
362
362
|
|
|
363
363
|
Args:
|
|
@@ -378,7 +378,7 @@ Match roles in the role set to each expert in expert set.
|
|
|
378
378
|
self.open_ports.append(server_id.split("_")[-1])
|
|
379
379
|
print(colored(f"Agent {agent_name} has been cleared.", "yellow"), flush=True)
|
|
380
380
|
|
|
381
|
-
def clear_all_agents(self, recycle_endpoint:
|
|
381
|
+
def clear_all_agents(self, recycle_endpoint: bool | None = True):
|
|
382
382
|
"""Clear all cached agents."""
|
|
383
383
|
for agent_name in [agent_name for agent_name in self.agent_procs_assign]:
|
|
384
384
|
self.clear_agent(agent_name, recycle_endpoint)
|
|
@@ -387,12 +387,12 @@ Match roles in the role set to each expert in expert set.
|
|
|
387
387
|
def build(
|
|
388
388
|
self,
|
|
389
389
|
building_task: str,
|
|
390
|
-
default_llm_config:
|
|
391
|
-
coding:
|
|
392
|
-
code_execution_config:
|
|
393
|
-
use_oai_assistant:
|
|
394
|
-
user_proxy:
|
|
395
|
-
max_agents:
|
|
390
|
+
default_llm_config: LLMConfig | dict[str, Any],
|
|
391
|
+
coding: bool | None = None,
|
|
392
|
+
code_execution_config: dict[str, Any] | None = None,
|
|
393
|
+
use_oai_assistant: bool | None = False,
|
|
394
|
+
user_proxy: ConversableAgent | None = None,
|
|
395
|
+
max_agents: int | None = None,
|
|
396
396
|
**kwargs: Any,
|
|
397
397
|
) -> tuple[list[ConversableAgent], dict[str, Any]]:
|
|
398
398
|
"""Auto build agents based on the building task.
|
|
@@ -515,13 +515,13 @@ Match roles in the role set to each expert in expert set.
|
|
|
515
515
|
self,
|
|
516
516
|
building_task: str,
|
|
517
517
|
library_path_or_json: str,
|
|
518
|
-
default_llm_config:
|
|
518
|
+
default_llm_config: LLMConfig | dict[str, Any],
|
|
519
519
|
top_k: int = 3,
|
|
520
|
-
coding:
|
|
521
|
-
code_execution_config:
|
|
522
|
-
use_oai_assistant:
|
|
523
|
-
embedding_model:
|
|
524
|
-
user_proxy:
|
|
520
|
+
coding: bool | None = None,
|
|
521
|
+
code_execution_config: dict[str, Any] | None = None,
|
|
522
|
+
use_oai_assistant: bool | None = False,
|
|
523
|
+
embedding_model: str | None = "all-mpnet-base-v2",
|
|
524
|
+
user_proxy: ConversableAgent | None = None,
|
|
525
525
|
**kwargs: Any,
|
|
526
526
|
) -> tuple[list[ConversableAgent], dict[str, Any]]:
|
|
527
527
|
"""Build agents from a library.
|
|
@@ -668,7 +668,7 @@ Match roles in the role set to each expert in expert set.
|
|
|
668
668
|
return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
|
|
669
669
|
|
|
670
670
|
def _build_agents(
|
|
671
|
-
self, use_oai_assistant:
|
|
671
|
+
self, use_oai_assistant: bool | None = False, user_proxy: ConversableAgent | None = None, **kwargs
|
|
672
672
|
) -> tuple[list[ConversableAgent], dict[str, Any]]:
|
|
673
673
|
"""Build agents with generated configs.
|
|
674
674
|
|
|
@@ -711,7 +711,7 @@ Match roles in the role set to each expert in expert set.
|
|
|
711
711
|
|
|
712
712
|
return agent_list, self.cached_configs.copy()
|
|
713
713
|
|
|
714
|
-
def save(self, filepath:
|
|
714
|
+
def save(self, filepath: str | None = None) -> str:
|
|
715
715
|
"""Save building configs. If the filepath is not specific, this function will create a filename by encrypt the
|
|
716
716
|
building_task string by md5 with "save_config_" prefix, and save config to the local path.
|
|
717
717
|
|
|
@@ -731,9 +731,9 @@ Match roles in the role set to each expert in expert set.
|
|
|
731
731
|
|
|
732
732
|
def load(
|
|
733
733
|
self,
|
|
734
|
-
filepath:
|
|
735
|
-
config_json:
|
|
736
|
-
use_oai_assistant:
|
|
734
|
+
filepath: str | None = None,
|
|
735
|
+
config_json: str | None = None,
|
|
736
|
+
use_oai_assistant: bool | None = False,
|
|
737
737
|
**kwargs: Any,
|
|
738
738
|
) -> tuple[list[ConversableAgent], dict[str, Any]]:
|
|
739
739
|
"""Load building configs and call the build function to complete building without calling online LLMs' api.
|
|
@@ -4,7 +4,8 @@
|
|
|
4
4
|
import hashlib
|
|
5
5
|
import json
|
|
6
6
|
import os
|
|
7
|
-
from
|
|
7
|
+
from collections.abc import Callable
|
|
8
|
+
from typing import Any, Literal
|
|
8
9
|
|
|
9
10
|
from termcolor import colored
|
|
10
11
|
|
|
@@ -135,17 +136,17 @@ Note that the previous experts will forget everything after you obtain the respo
|
|
|
135
136
|
def __init__(
|
|
136
137
|
self,
|
|
137
138
|
name: str,
|
|
138
|
-
system_message:
|
|
139
|
-
llm_config:
|
|
140
|
-
is_termination_msg:
|
|
141
|
-
max_consecutive_auto_reply:
|
|
142
|
-
human_input_mode:
|
|
143
|
-
code_execution_config:
|
|
144
|
-
nested_config:
|
|
145
|
-
agent_lib:
|
|
146
|
-
tool_lib:
|
|
147
|
-
agent_config_save_path:
|
|
148
|
-
description:
|
|
139
|
+
system_message: str | None = None,
|
|
140
|
+
llm_config: LLMConfig | dict[str, Any] | Literal[False] | None = None,
|
|
141
|
+
is_termination_msg: Callable[[dict[str, Any]], bool] | None = None,
|
|
142
|
+
max_consecutive_auto_reply: int | None = None,
|
|
143
|
+
human_input_mode: str | None = "NEVER",
|
|
144
|
+
code_execution_config: dict[str, Any] | Literal[False] | None = False,
|
|
145
|
+
nested_config: dict[str, Any] | None = None,
|
|
146
|
+
agent_lib: str | None = None,
|
|
147
|
+
tool_lib: str | None = None,
|
|
148
|
+
agent_config_save_path: str | None = None,
|
|
149
|
+
description: str | None = DEFAULT_DESCRIPTION,
|
|
149
150
|
**kwargs: Any,
|
|
150
151
|
):
|
|
151
152
|
"""Args:
|
|
@@ -227,7 +228,7 @@ Note that the previous experts will forget everything after you obtain the respo
|
|
|
227
228
|
)
|
|
228
229
|
|
|
229
230
|
@staticmethod
|
|
230
|
-
def _update_config(default_dict: dict[str, Any], update_dict:
|
|
231
|
+
def _update_config(default_dict: dict[str, Any], update_dict: dict[str, Any] | None) -> dict[str, Any]:
|
|
231
232
|
"""Recursively updates the default_dict with values from update_dict."""
|
|
232
233
|
if update_dict is None:
|
|
233
234
|
return default_dict
|
|
@@ -297,14 +298,14 @@ Collect information from the general task, follow the suggestions from manager t
|
|
|
297
298
|
name: str,
|
|
298
299
|
nested_config: dict[str, Any],
|
|
299
300
|
agent_config_save_path: str = None,
|
|
300
|
-
is_termination_msg:
|
|
301
|
-
max_consecutive_auto_reply:
|
|
302
|
-
human_input_mode:
|
|
303
|
-
code_execution_config:
|
|
304
|
-
default_auto_reply:
|
|
305
|
-
llm_config:
|
|
306
|
-
system_message:
|
|
307
|
-
description:
|
|
301
|
+
is_termination_msg: Callable[[dict[str, Any]], bool] | None = None,
|
|
302
|
+
max_consecutive_auto_reply: int | None = None,
|
|
303
|
+
human_input_mode: str | None = "NEVER",
|
|
304
|
+
code_execution_config: dict[str, Any] | Literal[False] | None = None,
|
|
305
|
+
default_auto_reply: str | dict[str, Any] | None = DEFAULT_AUTO_REPLY,
|
|
306
|
+
llm_config: LLMConfig | dict[str, Any] | Literal[False] | None = False,
|
|
307
|
+
system_message: str | list | None = "",
|
|
308
|
+
description: str | None = None,
|
|
308
309
|
):
|
|
309
310
|
"""Args:
|
|
310
311
|
name (str): name of the agent.
|
|
@@ -14,7 +14,6 @@ import traceback
|
|
|
14
14
|
from hashlib import md5
|
|
15
15
|
from pathlib import Path
|
|
16
16
|
from textwrap import dedent, indent
|
|
17
|
-
from typing import Optional, Union
|
|
18
17
|
|
|
19
18
|
from .... import AssistantAgent, UserProxyAgent
|
|
20
19
|
from ....coding import CodeExecutor, CodeExtractor, LocalCommandLineCodeExecutor, MarkdownCodeExtractor
|
|
@@ -76,7 +75,7 @@ You have access to the following functions. You can write python code to call th
|
|
|
76
75
|
agent.update_system_message(sys_message)
|
|
77
76
|
return
|
|
78
77
|
|
|
79
|
-
def bind_user_proxy(self, agent: UserProxyAgent, tool_root:
|
|
78
|
+
def bind_user_proxy(self, agent: UserProxyAgent, tool_root: str | list):
|
|
80
79
|
"""Updates user proxy agent with a executor so that code executor can successfully execute function-related code.
|
|
81
80
|
Returns an updated user proxy.
|
|
82
81
|
"""
|
|
@@ -164,7 +163,7 @@ class LocalExecutorWithTools(CodeExecutor):
|
|
|
164
163
|
"""(Experimental) Export a code extractor that can be used by an agent."""
|
|
165
164
|
return MarkdownCodeExtractor()
|
|
166
165
|
|
|
167
|
-
def __init__(self, tools:
|
|
166
|
+
def __init__(self, tools: list[Tool] | None = None, work_dir: Path | str = Path()):
|
|
168
167
|
self.tools = tools if tools is not None else []
|
|
169
168
|
self.work_dir = work_dir
|
|
170
169
|
if not os.path.exists(work_dir):
|
|
@@ -9,7 +9,7 @@ import json
|
|
|
9
9
|
import logging
|
|
10
10
|
import time
|
|
11
11
|
from collections import defaultdict
|
|
12
|
-
from typing import Any
|
|
12
|
+
from typing import Any
|
|
13
13
|
|
|
14
14
|
from ... import OpenAIWrapper
|
|
15
15
|
from ...llm_config import LLMConfig
|
|
@@ -31,9 +31,9 @@ class GPTAssistantAgent(ConversableAgent):
|
|
|
31
31
|
def __init__(
|
|
32
32
|
self,
|
|
33
33
|
name="GPT Assistant",
|
|
34
|
-
instructions:
|
|
35
|
-
llm_config:
|
|
36
|
-
assistant_config:
|
|
34
|
+
instructions: str | None = None,
|
|
35
|
+
llm_config: LLMConfig | dict[str, Any] | bool | None = None,
|
|
36
|
+
assistant_config: dict[str, Any] | None = None,
|
|
37
37
|
overwrite_instructions: bool = False,
|
|
38
38
|
overwrite_tools: bool = False,
|
|
39
39
|
**kwargs: Any,
|
|
@@ -182,10 +182,10 @@ class GPTAssistantAgent(ConversableAgent):
|
|
|
182
182
|
|
|
183
183
|
def _invoke_assistant(
|
|
184
184
|
self,
|
|
185
|
-
messages:
|
|
186
|
-
sender:
|
|
187
|
-
config:
|
|
188
|
-
) -> tuple[bool,
|
|
185
|
+
messages: list[dict[str, Any]] | None = None,
|
|
186
|
+
sender: Agent | None = None,
|
|
187
|
+
config: Any | None = None,
|
|
188
|
+
) -> tuple[bool, str | dict[str, Any] | None]:
|
|
189
189
|
"""Invokes the OpenAI assistant to generate a reply based on the given messages.
|
|
190
190
|
|
|
191
191
|
Args:
|
|
@@ -392,7 +392,7 @@ class GPTAssistantAgent(ConversableAgent):
|
|
|
392
392
|
# Clear the record of unread messages
|
|
393
393
|
self._unread_index.clear()
|
|
394
394
|
|
|
395
|
-
def clear_history(self, agent:
|
|
395
|
+
def clear_history(self, agent: Agent | None = None):
|
|
396
396
|
"""Clear the chat history of the agent.
|
|
397
397
|
|
|
398
398
|
Args:
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# SPDX-License-Identifier: MIT
|
|
7
7
|
from dataclasses import dataclass, field
|
|
8
8
|
from enum import Enum, auto
|
|
9
|
-
from typing import Any
|
|
9
|
+
from typing import Any
|
|
10
10
|
|
|
11
11
|
__all__ = ["Document", "DocumentType"]
|
|
12
12
|
|
|
@@ -25,5 +25,5 @@ class Document:
|
|
|
25
25
|
"""A wrapper of graph store query results."""
|
|
26
26
|
|
|
27
27
|
doctype: DocumentType
|
|
28
|
-
data:
|
|
29
|
-
path_or_url:
|
|
28
|
+
data: Any | None = None
|
|
29
|
+
path_or_url: str | None = field(default_factory=lambda: "")
|
|
@@ -28,8 +28,8 @@ class FalkorGraphQueryEngine:
|
|
|
28
28
|
name: str,
|
|
29
29
|
host: str = "127.0.0.1",
|
|
30
30
|
port: int = 6379,
|
|
31
|
-
username:
|
|
32
|
-
password:
|
|
31
|
+
username: str | None = None,
|
|
32
|
+
password: str | None = None,
|
|
33
33
|
model: Optional["GenerativeModel"] = None,
|
|
34
34
|
ontology: Optional["Ontology"] = None,
|
|
35
35
|
):
|
|
@@ -57,7 +57,7 @@ class FalkorGraphQueryEngine:
|
|
|
57
57
|
self.model = model or OpenAiGenerativeModel("gpt-4o")
|
|
58
58
|
self.model_config = KnowledgeGraphModelConfig.with_model(model)
|
|
59
59
|
self.ontology = ontology
|
|
60
|
-
self.knowledge_graph:
|
|
60
|
+
self.knowledge_graph: KnowledgeGraph | None = None # type: ignore[no-any-unimported]
|
|
61
61
|
self.falkordb = FalkorDB(host=self.host, port=self.port, username=self.username, password=self.password)
|
|
62
62
|
|
|
63
63
|
def connect_db(self) -> None:
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
#
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
|
4
4
|
|
|
5
|
-
from typing import Any
|
|
5
|
+
from typing import Any
|
|
6
6
|
|
|
7
7
|
from .... import Agent, ConversableAgent
|
|
8
8
|
from .falkor_graph_query_engine import FalkorGraphQueryEngine
|
|
@@ -50,10 +50,10 @@ class FalkorGraphRagCapability(GraphRagCapability):
|
|
|
50
50
|
def _reply_using_falkordb_query(
|
|
51
51
|
self,
|
|
52
52
|
recipient: ConversableAgent,
|
|
53
|
-
messages:
|
|
54
|
-
sender:
|
|
55
|
-
config:
|
|
56
|
-
) -> tuple[bool,
|
|
53
|
+
messages: list[dict[str, Any]] | None = None,
|
|
54
|
+
sender: Agent | None = None,
|
|
55
|
+
config: Any | None = None,
|
|
56
|
+
) -> tuple[bool, str | dict[str, Any] | None]:
|
|
57
57
|
"""Query FalkorDB and return the message. Internally, it utilises OpenAI to generate a reply based on the given messages.
|
|
58
58
|
The history with FalkorDB is also logged and updated.
|
|
59
59
|
|
|
@@ -76,7 +76,7 @@ class FalkorGraphRagCapability(GraphRagCapability):
|
|
|
76
76
|
|
|
77
77
|
return True, result.answer if result.answer else "I'm sorry, I don't have an answer for that."
|
|
78
78
|
|
|
79
|
-
def _messages_summary(self, messages:
|
|
79
|
+
def _messages_summary(self, messages: dict[str, Any] | str, system_message: str) -> str:
|
|
80
80
|
"""Summarize the messages in the conversation history. Excluding any message with 'tool_calls' and 'tool_responses'
|
|
81
81
|
Includes the 'name' (if it exists) and the 'content', with a new line between each one, like:
|
|
82
82
|
customer:
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
6
|
# SPDX-License-Identifier: MIT
|
|
7
7
|
from dataclasses import dataclass, field
|
|
8
|
-
from typing import Any,
|
|
8
|
+
from typing import Any, Protocol, runtime_checkable
|
|
9
9
|
|
|
10
10
|
from .document import Document
|
|
11
11
|
|
|
@@ -20,7 +20,7 @@ class GraphStoreQueryResult:
|
|
|
20
20
|
results: intermediate results to question/query, e.g. node entities.
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
|
-
answer:
|
|
23
|
+
answer: str | None = None
|
|
24
24
|
results: list[Any] = field(default_factory=list)
|
|
25
25
|
|
|
26
26
|
|
|
@@ -31,7 +31,7 @@ class GraphQueryEngine(Protocol):
|
|
|
31
31
|
This interface defines the basic methods for graph-based RAG.
|
|
32
32
|
"""
|
|
33
33
|
|
|
34
|
-
def init_db(self, input_doc:
|
|
34
|
+
def init_db(self, input_doc: list[Document] | None = None) -> None:
|
|
35
35
|
"""This method initializes graph database with the input documents or records.
|
|
36
36
|
Usually, it takes the following steps,
|
|
37
37
|
1. connecting to a graph database.
|
|
@@ -2,13 +2,7 @@
|
|
|
2
2
|
#
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
|
4
4
|
import os
|
|
5
|
-
import
|
|
6
|
-
from typing import Any, Optional, Union
|
|
7
|
-
|
|
8
|
-
if sys.version_info >= (3, 10):
|
|
9
|
-
from typing import TypeAlias
|
|
10
|
-
else:
|
|
11
|
-
from typing_extensions import TypeAlias
|
|
5
|
+
from typing import Any, Optional, TypeAlias
|
|
12
6
|
|
|
13
7
|
from ....import_utils import optional_import_block, require_optional_import
|
|
14
8
|
from .document import Document, DocumentType
|
|
@@ -64,8 +58,8 @@ class Neo4jGraphQueryEngine:
|
|
|
64
58
|
embedding: Optional["BaseEmbedding"] = None,
|
|
65
59
|
entities: Optional["TypeAlias"] = None,
|
|
66
60
|
relations: Optional["TypeAlias"] = None,
|
|
67
|
-
schema:
|
|
68
|
-
strict:
|
|
61
|
+
schema: dict[str, str] | list["Triple"] | None = None,
|
|
62
|
+
strict: bool | None = False,
|
|
69
63
|
):
|
|
70
64
|
"""Initialize a Neo4j Property graph.
|
|
71
65
|
Please also refer to https://docs.llamaindex.ai/en/stable/examples/property_graph/graph_store/
|
|
@@ -96,7 +90,7 @@ class Neo4jGraphQueryEngine:
|
|
|
96
90
|
self.schema = schema
|
|
97
91
|
self.strict = strict
|
|
98
92
|
|
|
99
|
-
def init_db(self, input_doc:
|
|
93
|
+
def init_db(self, input_doc: list[Document] | None = None) -> None:
|
|
100
94
|
"""Build the knowledge graph with input documents."""
|
|
101
95
|
self.documents = self._load_doc(input_doc if input_doc is not None else [])
|
|
102
96
|
|
|
@@ -245,7 +239,7 @@ class Neo4jGraphQueryEngine:
|
|
|
245
239
|
# To add more extractors, please refer to https://docs.llamaindex.ai/en/latest/module_guides/indexing/lpg_index_guide/#construction
|
|
246
240
|
"""
|
|
247
241
|
#
|
|
248
|
-
kg_extractors: list[
|
|
242
|
+
kg_extractors: list[TransformComponent] = [ # type: ignore[no-any-unimported]
|
|
249
243
|
SchemaLLMPathExtractor(
|
|
250
244
|
llm=self.llm,
|
|
251
245
|
possible_entities=self.entities,
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
#
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
|
4
4
|
|
|
5
|
-
from typing import Any
|
|
5
|
+
from typing import Any
|
|
6
6
|
|
|
7
7
|
from .... import Agent, ConversableAgent, UserProxyAgent
|
|
8
8
|
from .graph_query_engine import GraphStoreQueryResult
|
|
@@ -46,10 +46,10 @@ class Neo4jGraphCapability(GraphRagCapability):
|
|
|
46
46
|
def _reply_using_neo4j_query(
|
|
47
47
|
self,
|
|
48
48
|
recipient: ConversableAgent,
|
|
49
|
-
messages:
|
|
50
|
-
sender:
|
|
51
|
-
config:
|
|
52
|
-
) -> tuple[bool,
|
|
49
|
+
messages: list[dict[str, Any]] | None = None,
|
|
50
|
+
sender: Agent | None = None,
|
|
51
|
+
config: Any | None = None,
|
|
52
|
+
) -> tuple[bool, str | dict[str, Any] | None]:
|
|
53
53
|
"""Query neo4j and return the message. Internally, it queries the Property graph
|
|
54
54
|
and returns the answer from the graph query engine.
|
|
55
55
|
TODO: reply with a dictionary including both the answer and semantic source triplets.
|
|
@@ -74,7 +74,7 @@ class Neo4jGraphCapability(GraphRagCapability):
|
|
|
74
74
|
|
|
75
75
|
return True, result.answer
|
|
76
76
|
|
|
77
|
-
def _get_last_question(self, message:
|
|
77
|
+
def _get_last_question(self, message: dict[str, Any] | str) -> str | dict[str, Any] | None:
|
|
78
78
|
"""Retrieves the last message from the conversation history."""
|
|
79
79
|
if isinstance(message, str):
|
|
80
80
|
return message
|