ag2 0.9.8.post1__py3-none-any.whl → 0.9.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/METADATA +232 -210
- {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/RECORD +88 -80
- autogen/_website/generate_mkdocs.py +3 -3
- autogen/_website/notebook_processor.py +1 -1
- autogen/_website/utils.py +1 -1
- autogen/agentchat/assistant_agent.py +15 -15
- autogen/agentchat/chat.py +52 -40
- autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
- autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
- autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
- autogen/agentchat/contrib/capabilities/transforms.py +1 -1
- autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
- autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
- autogen/agentchat/contrib/rag/query_engine.py +11 -11
- autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
- autogen/agentchat/contrib/swarm_agent.py +3 -2
- autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
- autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
- autogen/agentchat/contrib/web_surfer.py +1 -1
- autogen/agentchat/conversable_agent.py +184 -80
- autogen/agentchat/group/context_expression.py +21 -21
- autogen/agentchat/group/handoffs.py +11 -11
- autogen/agentchat/group/multi_agent_chat.py +3 -2
- autogen/agentchat/group/on_condition.py +11 -11
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +224 -0
- autogen/agentchat/group/safeguards/enforcer.py +1064 -0
- autogen/agentchat/group/safeguards/events.py +119 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/groupchat.py +60 -19
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
- autogen/agentchat/realtime/experimental/function_observer.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_swarm.py +21 -10
- autogen/agentchat/user_proxy_agent.py +55 -53
- autogen/agents/experimental/document_agent/document_agent.py +1 -10
- autogen/agents/experimental/document_agent/parser_utils.py +5 -1
- autogen/browser_utils.py +4 -4
- autogen/cache/abstract_cache_base.py +2 -6
- autogen/cache/disk_cache.py +1 -6
- autogen/cache/in_memory_cache.py +2 -6
- autogen/cache/redis_cache.py +1 -5
- autogen/coding/__init__.py +10 -2
- autogen/coding/base.py +2 -1
- autogen/coding/docker_commandline_code_executor.py +1 -6
- autogen/coding/factory.py +9 -0
- autogen/coding/jupyter/docker_jupyter_server.py +1 -7
- autogen/coding/jupyter/jupyter_client.py +2 -9
- autogen/coding/jupyter/jupyter_code_executor.py +2 -7
- autogen/coding/jupyter/local_jupyter_server.py +2 -6
- autogen/coding/local_commandline_code_executor.py +0 -65
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/environments/docker_python_environment.py +3 -3
- autogen/environments/system_python_environment.py +5 -5
- autogen/environments/venv_python_environment.py +5 -5
- autogen/events/agent_events.py +1 -1
- autogen/events/client_events.py +1 -1
- autogen/fast_depends/utils.py +10 -0
- autogen/graph_utils.py +5 -7
- autogen/import_utils.py +28 -15
- autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
- autogen/io/processors/console_event_processor.py +8 -3
- autogen/llm_config/config.py +168 -91
- autogen/llm_config/entry.py +38 -26
- autogen/llm_config/types.py +35 -0
- autogen/llm_config/utils.py +223 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
- autogen/messages/agent_messages.py +1 -1
- autogen/messages/client_messages.py +1 -1
- autogen/oai/__init__.py +8 -1
- autogen/oai/client.py +10 -3
- autogen/oai/client_utils.py +1 -1
- autogen/oai/cohere.py +4 -4
- autogen/oai/gemini.py +4 -6
- autogen/oai/gemini_types.py +1 -0
- autogen/oai/openai_utils.py +44 -115
- autogen/tools/dependency_injection.py +4 -8
- autogen/tools/experimental/reliable/reliable.py +3 -2
- autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
- autogen/tools/function_utils.py +2 -1
- autogen/version.py +1 -1
- {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/WHEEL +0 -0
- {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.8.post1.dist-info → ag2-0.9.10.dist-info}/licenses/NOTICE.md +0 -0
autogen/agentchat/chat.py
CHANGED
|
@@ -7,11 +7,12 @@
|
|
|
7
7
|
import asyncio
|
|
8
8
|
import datetime
|
|
9
9
|
import logging
|
|
10
|
+
import uuid
|
|
10
11
|
import warnings
|
|
11
12
|
from collections import defaultdict
|
|
12
|
-
from dataclasses import dataclass
|
|
13
|
+
from dataclasses import dataclass, field
|
|
13
14
|
from functools import partial
|
|
14
|
-
from typing import Any
|
|
15
|
+
from typing import Any, TypedDict
|
|
15
16
|
|
|
16
17
|
from ..doc_utils import export_module
|
|
17
18
|
from ..events.agent_events import PostCarryoverProcessingEvent
|
|
@@ -24,26 +25,38 @@ Prerequisite = tuple[int, int]
|
|
|
24
25
|
__all__ = ["ChatResult", "a_initiate_chats", "initiate_chats"]
|
|
25
26
|
|
|
26
27
|
|
|
28
|
+
class CostDict(TypedDict):
|
|
29
|
+
usage_including_cached_inference: dict[str, Any]
|
|
30
|
+
usage_excluding_cached_inference: dict[str, Any]
|
|
31
|
+
|
|
32
|
+
|
|
27
33
|
@dataclass
|
|
28
34
|
@export_module("autogen")
|
|
29
35
|
class ChatResult:
|
|
30
36
|
"""(Experimental) The result of a chat. Almost certain to be changed."""
|
|
31
37
|
|
|
32
|
-
chat_id: int =
|
|
38
|
+
chat_id: int = field(default_factory=lambda: uuid.uuid4().int)
|
|
33
39
|
"""chat id"""
|
|
34
|
-
|
|
40
|
+
|
|
41
|
+
chat_history: list[dict[str, Any]] = field(default_factory=list)
|
|
35
42
|
"""The chat history."""
|
|
36
|
-
|
|
43
|
+
|
|
44
|
+
summary: str = ""
|
|
37
45
|
"""A summary obtained from the chat."""
|
|
38
|
-
|
|
39
|
-
|
|
46
|
+
|
|
47
|
+
cost: CostDict = field(
|
|
48
|
+
default_factory=lambda: {
|
|
49
|
+
"usage_including_cached_inference": {},
|
|
50
|
+
"usage_excluding_cached_inference": {},
|
|
51
|
+
}
|
|
40
52
|
)
|
|
41
53
|
"""The cost of the chat.
|
|
42
54
|
The value for each usage type is a dictionary containing cost information for that specific type.
|
|
43
55
|
- "usage_including_cached_inference": Cost information on the total usage, including the tokens in cached inference.
|
|
44
56
|
- "usage_excluding_cached_inference": Cost information on the usage of tokens, excluding the tokens in cache. No larger than "usage_including_cached_inference".
|
|
45
57
|
"""
|
|
46
|
-
|
|
58
|
+
|
|
59
|
+
human_input: list[str] = field(default_factory=list)
|
|
47
60
|
"""A list of human input solicited during the chat."""
|
|
48
61
|
|
|
49
62
|
|
|
@@ -141,37 +154,36 @@ def initiate_chats(chat_queue: list[dict[str, Any]]) -> list[ChatResult]:
|
|
|
141
154
|
"""Initiate a list of chats.
|
|
142
155
|
|
|
143
156
|
Args:
|
|
144
|
-
chat_queue (List[Dict]): A list of dictionaries containing the information about the chats
|
|
145
|
-
|
|
157
|
+
chat_queue (List[Dict]): A list of dictionaries containing the information about the chats.\n
|
|
146
158
|
Each dictionary should contain the input arguments for
|
|
147
|
-
[`ConversableAgent.initiate_chat`](../ConversableAgent#initiate-chat)
|
|
148
|
-
For example
|
|
149
|
-
- `"sender"` - the sender agent
|
|
150
|
-
- `"recipient"` - the recipient agent
|
|
151
|
-
- `"clear_history"` (bool) - whether to clear the chat history with the agent
|
|
152
|
-
|
|
153
|
-
- `"silent"` (bool or None) - (Experimental) whether to print the messages in this
|
|
154
|
-
|
|
155
|
-
- `"cache"` (Cache or None) - the cache client to use for this conversation
|
|
156
|
-
|
|
157
|
-
- `"max_turns"` (int or None) - maximum number of turns for the chat. If None, the chat
|
|
158
|
-
|
|
159
|
-
- `"summary_method"` (str or callable) - a string or callable specifying the method to get
|
|
160
|
-
|
|
161
|
-
- `"summary_args"` (dict) - a dictionary of arguments to be passed to the summary_method
|
|
162
|
-
|
|
163
|
-
- `"message"` (str, callable or None) - if None, input() will be called to get the
|
|
164
|
-
|
|
165
|
-
- `**context` - additional context information to be passed to the chat
|
|
166
|
-
- `"carryover"` - It can be used to specify the carryover information to be passed
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
- `"finished_chat_indexes_to_exclude_from_carryover"` - It can be used by specifying a list of indexes of the finished_chats list
|
|
170
|
-
|
|
171
|
-
|
|
159
|
+
[`ConversableAgent.initiate_chat`](../ConversableAgent#initiate-chat).\n
|
|
160
|
+
For example:\n
|
|
161
|
+
- `"sender"` - the sender agent.\n
|
|
162
|
+
- `"recipient"` - the recipient agent.\n
|
|
163
|
+
- `"clear_history"` (bool) - whether to clear the chat history with the agent.\n
|
|
164
|
+
Default is True.\n
|
|
165
|
+
- `"silent"` (bool or None) - (Experimental) whether to print the messages in this\n
|
|
166
|
+
conversation. Default is False.\n
|
|
167
|
+
- `"cache"` (Cache or None) - the cache client to use for this conversation.\n
|
|
168
|
+
Default is None.\n
|
|
169
|
+
- `"max_turns"` (int or None) - maximum number of turns for the chat. If None, the chat\n
|
|
170
|
+
will continue until a termination condition is met. Default is None.\n
|
|
171
|
+
- `"summary_method"` (str or callable) - a string or callable specifying the method to get\n
|
|
172
|
+
a summary from the chat. Default is DEFAULT_summary_method, i.e., "last_msg".\n
|
|
173
|
+
- `"summary_args"` (dict) - a dictionary of arguments to be passed to the summary_method.\n
|
|
174
|
+
Default is {}.\n
|
|
175
|
+
- `"message"` (str, callable or None) - if None, input() will be called to get the\n
|
|
176
|
+
initial message.\n
|
|
177
|
+
- `**context` - additional context information to be passed to the chat.\n
|
|
178
|
+
- `"carryover"` - It can be used to specify the carryover information to be passed\n
|
|
179
|
+
to this chat. If provided, we will combine this carryover with the "message" content when\n
|
|
180
|
+
generating the initial chat message in `generate_init_message`.\n
|
|
181
|
+
- `"finished_chat_indexes_to_exclude_from_carryover"` - It can be used by specifying a list of indexes of the finished_chats list,\n
|
|
182
|
+
from which to exclude the summaries for carryover. If 'finished_chat_indexes_to_exclude_from_carryover' is not provided or an empty list,\n
|
|
183
|
+
then summary from all the finished chats will be taken.\n
|
|
172
184
|
|
|
173
185
|
Returns:
|
|
174
|
-
(list): a list of ChatResult objects corresponding to the finished chats in the chat_queue
|
|
186
|
+
(list): a list of ChatResult objects corresponding to the finished chats in the chat_queue.\n
|
|
175
187
|
"""
|
|
176
188
|
consolidate_chat_info(chat_queue)
|
|
177
189
|
_validate_recipients(chat_queue)
|
|
@@ -220,7 +232,7 @@ async def _dependent_chat_future(
|
|
|
220
232
|
finished_chat_indexes_to_exclude_from_carryover = chat_info.get(
|
|
221
233
|
"finished_chat_indexes_to_exclude_from_carryover", []
|
|
222
234
|
)
|
|
223
|
-
finished_chats =
|
|
235
|
+
finished_chats = {}
|
|
224
236
|
for chat in prerequisite_chat_futures:
|
|
225
237
|
chat_future = prerequisite_chat_futures[chat]
|
|
226
238
|
if chat_future.cancelled():
|
|
@@ -291,18 +303,18 @@ async def a_initiate_chats(chat_queue: list[dict[str, Any]]) -> dict[int, ChatRe
|
|
|
291
303
|
num_chats = chat_book.keys()
|
|
292
304
|
prerequisites = __create_async_prerequisites(chat_queue)
|
|
293
305
|
chat_order_by_id = __find_async_chat_order(num_chats, prerequisites)
|
|
294
|
-
finished_chat_futures =
|
|
306
|
+
finished_chat_futures = {}
|
|
295
307
|
for chat_id in chat_order_by_id:
|
|
296
308
|
chat_info = chat_book[chat_id]
|
|
297
309
|
prerequisite_chat_ids = chat_info.get("prerequisites", [])
|
|
298
|
-
pre_chat_futures =
|
|
310
|
+
pre_chat_futures = {}
|
|
299
311
|
for pre_chat_id in prerequisite_chat_ids:
|
|
300
312
|
pre_chat_future = finished_chat_futures[pre_chat_id]
|
|
301
313
|
pre_chat_futures[pre_chat_id] = pre_chat_future
|
|
302
314
|
current_chat_future = await _dependent_chat_future(chat_id, chat_info, pre_chat_futures)
|
|
303
315
|
finished_chat_futures[chat_id] = current_chat_future
|
|
304
316
|
await asyncio.gather(*list(finished_chat_futures.values()))
|
|
305
|
-
finished_chats =
|
|
317
|
+
finished_chats = {}
|
|
306
318
|
for chat in finished_chat_futures:
|
|
307
319
|
chat_result = finished_chat_futures[chat].result()
|
|
308
320
|
finished_chats[chat] = chat_result
|
|
@@ -34,11 +34,11 @@ class LLMLingua:
|
|
|
34
34
|
|
|
35
35
|
def __init__(
|
|
36
36
|
self,
|
|
37
|
-
prompt_compressor_kwargs: dict =
|
|
38
|
-
model_name
|
|
39
|
-
use_llmlingua2
|
|
40
|
-
device_map
|
|
41
|
-
|
|
37
|
+
prompt_compressor_kwargs: dict = {
|
|
38
|
+
"model_name": "microsoft/llmlingua-2-bert-base-multilingual-cased-meetingbank",
|
|
39
|
+
"use_llmlingua2": True,
|
|
40
|
+
"device_map": "cpu",
|
|
41
|
+
},
|
|
42
42
|
structured_compression: bool = False,
|
|
43
43
|
) -> None:
|
|
44
44
|
"""Args:
|
|
@@ -337,7 +337,7 @@ class TextMessageCompressor:
|
|
|
337
337
|
self,
|
|
338
338
|
text_compressor: TextCompressor | None = None,
|
|
339
339
|
min_tokens: int | None = None,
|
|
340
|
-
compression_params: dict =
|
|
340
|
+
compression_params: dict = {},
|
|
341
341
|
cache: AbstractCache | None = None,
|
|
342
342
|
filter_dict: dict[str, Any] | None = None,
|
|
343
343
|
exclude_filter: bool = True,
|
|
@@ -380,7 +380,7 @@ Match roles in the role set to each expert in expert set.
|
|
|
380
380
|
|
|
381
381
|
def clear_all_agents(self, recycle_endpoint: bool | None = True):
|
|
382
382
|
"""Clear all cached agents."""
|
|
383
|
-
for agent_name in
|
|
383
|
+
for agent_name in list(self.agent_procs_assign):
|
|
384
384
|
self.clear_agent(agent_name, recycle_endpoint)
|
|
385
385
|
print(colored("All agents have been cleared.", "yellow"), flush=True)
|
|
386
386
|
|
|
@@ -149,25 +149,26 @@ Note that the previous experts will forget everything after you obtain the respo
|
|
|
149
149
|
description: str | None = DEFAULT_DESCRIPTION,
|
|
150
150
|
**kwargs: Any,
|
|
151
151
|
):
|
|
152
|
-
"""
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
152
|
+
"""
|
|
153
|
+
Args:\n
|
|
154
|
+
name (str): agent name.\n
|
|
155
|
+
system_message (str): system message for the ChatCompletion inference.\n
|
|
156
|
+
Please override this attribute if you want to reprogram the agent.\n
|
|
157
|
+
llm_config (LLMConfig or dict or False): llm inference configuration.\n
|
|
158
|
+
Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create) for available options.\n
|
|
159
|
+
is_termination_msg (function): a function that takes a message in the form of a dictionary\n
|
|
160
|
+
and returns a boolean value indicating if this received message is a termination message.\n
|
|
161
|
+
The dict can contain the following keys: "content", "role", "name", "function_call".\n
|
|
162
|
+
max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.\n
|
|
163
|
+
default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).\n
|
|
164
|
+
The limit only plays a role when human_input_mode is not "ALWAYS".\n
|
|
165
|
+
agent_lib (str): the path or a JSON file of the agent library for retrieving the nested chat instantiated by CaptainAgent.\n
|
|
166
|
+
tool_lib (str): the path to the tool library for retrieving the tools used in the nested chat instantiated by CaptainAgent.\n
|
|
167
|
+
nested_config (dict): the configuration for the nested chat instantiated by CaptainAgent.\n
|
|
168
|
+
A full list of keys and their functionalities can be found in [docs](https://docs.ag2.ai/latest/docs/user-guide/reference-agents/captainagent).\n
|
|
169
|
+
agent_config_save_path (str): the path to save the generated or retrieved agent configuration.\n
|
|
170
|
+
**kwargs (dict): Please refer to other kwargs in\n
|
|
171
|
+
[ConversableAgent](https://github.com/ag2ai/ag2/blob/main/autogen/agentchat/conversable_agent.py#L74).\n
|
|
171
172
|
"""
|
|
172
173
|
super().__init__(
|
|
173
174
|
name,
|
|
@@ -55,7 +55,7 @@ class FalkorGraphQueryEngine:
|
|
|
55
55
|
self.username = username
|
|
56
56
|
self.password = password
|
|
57
57
|
self.model = model or OpenAiGenerativeModel("gpt-4o")
|
|
58
|
-
self.model_config = KnowledgeGraphModelConfig.with_model(model)
|
|
58
|
+
self.model_config = KnowledgeGraphModelConfig.with_model(self.model)
|
|
59
59
|
self.ontology = ontology
|
|
60
60
|
self.knowledge_graph: KnowledgeGraph | None = None # type: ignore[no-any-unimported]
|
|
61
61
|
self.falkordb = FalkorDB(host=self.host, port=self.port, username=self.username, password=self.password)
|
|
@@ -139,9 +139,6 @@ class FalkorGraphQueryEngine:
|
|
|
139
139
|
|
|
140
140
|
response = self._chat_session.send_message(question)
|
|
141
141
|
|
|
142
|
-
# History will be considered when querying by setting the last_answer
|
|
143
|
-
self._chat_session.last_answer = response["response"]
|
|
144
|
-
|
|
145
142
|
return GraphStoreQueryResult(answer=response["response"], results=[])
|
|
146
143
|
|
|
147
144
|
def delete(self) -> bool:
|
|
@@ -167,4 +164,4 @@ class FalkorGraphQueryEngine:
|
|
|
167
164
|
if self.ontology_table_name not in self.falkordb.list_graphs():
|
|
168
165
|
raise ValueError(f"Knowledge graph {self.name} has not been created.")
|
|
169
166
|
graph = self.__get_ontology_storage_graph()
|
|
170
|
-
return Ontology.
|
|
167
|
+
return Ontology.from_schema_graph(graph)
|
|
@@ -14,10 +14,10 @@ __all__ = ["GraphRagCapability"]
|
|
|
14
14
|
class GraphRagCapability(AgentCapability):
|
|
15
15
|
"""A graph-based RAG capability uses a graph query engine to give a conversable agent the graph-based RAG ability.
|
|
16
16
|
|
|
17
|
-
An agent class with graph-based RAG capability could
|
|
18
|
-
1. create a graph in the underlying database with input documents
|
|
19
|
-
2. retrieved relevant information based on messages received by the agent
|
|
20
|
-
3. generate answers from retrieved information and send messages back
|
|
17
|
+
An agent class with graph-based RAG capability could:\n
|
|
18
|
+
1. create a graph in the underlying database with input documents.\n
|
|
19
|
+
2. retrieved relevant information based on messages received by the agent.\n
|
|
20
|
+
3. generate answers from retrieved information and send messages back.\n
|
|
21
21
|
|
|
22
22
|
For example,
|
|
23
23
|
```python
|
|
@@ -41,7 +41,7 @@ class GraphRagCapability(AgentCapability):
|
|
|
41
41
|
user_proxy.initiate_chat(graph_rag_agent, message="Name a few actors who've played in 'The Matrix'")
|
|
42
42
|
|
|
43
43
|
# ChatResult(
|
|
44
|
-
# chat_id=
|
|
44
|
+
# chat_id=uuid.uuid4().int,
|
|
45
45
|
# chat_history=[
|
|
46
46
|
# {'content': 'Name a few actors who've played in \'The Matrix\'', 'role': 'graph_rag_agent'},
|
|
47
47
|
# {'content': 'A few actors who have played in The Matrix are:
|
|
@@ -28,23 +28,24 @@ with optional_import_block():
|
|
|
28
28
|
|
|
29
29
|
@require_optional_import("llama_index", "neo4j")
|
|
30
30
|
class Neo4jGraphQueryEngine:
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
31
|
+
"""
|
|
32
|
+
This class serves as a wrapper for a property graph query engine backed by LlamaIndex and Neo4j,\n
|
|
33
|
+
facilitating the creating, connecting, updating, and querying of LlamaIndex property graphs.\n
|
|
34
|
+
\n
|
|
35
|
+
It builds a property graph Index from input documents,\n
|
|
36
|
+
storing and retrieving data from the property graph in the Neo4j database.\n
|
|
37
|
+
\n
|
|
38
|
+
It extracts triplets, i.e., [entity] -> [relationship] -> [entity] sets,\n
|
|
39
|
+
from the input documents using llamIndex extractors.\n
|
|
40
|
+
\n
|
|
41
|
+
Users can provide custom entities, relationships, and schema to guide the extraction process.\n
|
|
42
|
+
\n
|
|
43
|
+
If strict is True, the engine will extract triplets following the schema\n
|
|
44
|
+
of allowed relationships for each entity specified in the schema.\n
|
|
45
|
+
\n
|
|
46
|
+
It also leverages LlamaIndex's chat engine which has a conversation history internally to provide context-aware responses.\n
|
|
47
|
+
\n
|
|
48
|
+
For usage, please refer to example notebook/agentchat_graph_rag_neo4j.ipynb\n
|
|
48
49
|
"""
|
|
49
50
|
|
|
50
51
|
def __init__( # type: ignore[no-any-unimported]
|
|
@@ -17,7 +17,7 @@ with optional_import_block():
|
|
|
17
17
|
from llama_index.core import SimpleDirectoryReader, StorageContext, VectorStoreIndex
|
|
18
18
|
from llama_index.core.embeddings import BaseEmbedding
|
|
19
19
|
from llama_index.core.schema import Document as LlamaDocument
|
|
20
|
-
from llama_index.llms.langchain.base import LLM
|
|
20
|
+
from llama_index.llms.langchain.base import LLM # type: ignore[attr-defined]
|
|
21
21
|
from llama_index.llms.openai import OpenAI
|
|
22
22
|
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
|
|
23
23
|
from pymongo import MongoClient
|
|
@@ -114,7 +114,7 @@ class MongoDBQueryEngine:
|
|
|
114
114
|
logger.info("Vector database created.")
|
|
115
115
|
self.vector_search_engine = MongoDBAtlasVectorSearch(
|
|
116
116
|
mongodb_client=self.vector_db.client, # type: ignore[union-attr]
|
|
117
|
-
db_name=self.database_name,
|
|
117
|
+
db_name=self.database_name, # type: ignore[arg-type]
|
|
118
118
|
collection_name=self.collection_name,
|
|
119
119
|
)
|
|
120
120
|
logger.info("Vector search engine created.")
|
|
@@ -29,18 +29,18 @@ class RAGQueryEngine(Protocol):
|
|
|
29
29
|
"""Initialize the database with the input documents or records.
|
|
30
30
|
|
|
31
31
|
This method initializes database with the input documents or records.
|
|
32
|
-
Usually, it takes the following steps
|
|
33
|
-
1. connecting to a database
|
|
34
|
-
2. insert records
|
|
35
|
-
3. build indexes etc
|
|
32
|
+
Usually, it takes the following steps:\n
|
|
33
|
+
1. connecting to a database.\n
|
|
34
|
+
2. insert records.\n
|
|
35
|
+
3. build indexes etc.\n
|
|
36
36
|
|
|
37
|
-
Args
|
|
38
|
-
new_doc_dir (Optional[Union[Path, str]]): A directory containing documents to be ingested
|
|
39
|
-
new_doc_paths_or_urls (Optional[Sequence[Union[Path, str]]]): A list of paths or URLs to documents to be ingested
|
|
40
|
-
*args: Any additional arguments
|
|
41
|
-
**kwargs: Any additional keyword arguments
|
|
42
|
-
Returns
|
|
43
|
-
bool: True if initialization is successful, False otherwise
|
|
37
|
+
Args:\n
|
|
38
|
+
new_doc_dir (Optional[Union[Path, str]]): A directory containing documents to be ingested.\n
|
|
39
|
+
new_doc_paths_or_urls (Optional[Sequence[Union[Path, str]]]): A list of paths or URLs to documents to be ingested.\n
|
|
40
|
+
*args: Any additional arguments\n
|
|
41
|
+
**kwargs: Any additional keyword arguments\n
|
|
42
|
+
Returns:\n
|
|
43
|
+
bool: True if initialization is successful, False otherwise\n
|
|
44
44
|
"""
|
|
45
45
|
...
|
|
46
46
|
|
|
@@ -7,10 +7,13 @@
|
|
|
7
7
|
import warnings
|
|
8
8
|
from typing import Any
|
|
9
9
|
|
|
10
|
+
from typing_extensions import deprecated
|
|
11
|
+
|
|
10
12
|
from ..agent import Agent
|
|
11
13
|
from ..assistant_agent import AssistantAgent
|
|
12
14
|
|
|
13
15
|
|
|
16
|
+
@deprecated("The RetrieveAssistantAgent is deprecated. Please use the AssistantAgent instead.")
|
|
14
17
|
class RetrieveAssistantAgent(AssistantAgent):
|
|
15
18
|
"""(Experimental) Retrieve Assistant agent, designed to solve a task with LLM.
|
|
16
19
|
|
|
@@ -1163,8 +1163,9 @@ async def a_run_swarm(
|
|
|
1163
1163
|
except Exception as e:
|
|
1164
1164
|
response.iostream.send(ErrorEvent(error=e)) # type: ignore[call-arg]
|
|
1165
1165
|
|
|
1166
|
-
asyncio.create_task(stream_run())
|
|
1167
|
-
|
|
1166
|
+
task = asyncio.create_task(stream_run())
|
|
1167
|
+
# prevent the task from being garbage collected
|
|
1168
|
+
response._task_ref = task # type: ignore[attr-defined]
|
|
1168
1169
|
return response
|
|
1169
1170
|
|
|
1170
1171
|
|
|
@@ -279,7 +279,7 @@ class CouchbaseVectorDB(VectorDB):
|
|
|
279
279
|
|
|
280
280
|
for i in range(0, len(docs), batch_size):
|
|
281
281
|
batch = docs[i : i + batch_size]
|
|
282
|
-
docs_to_upsert =
|
|
282
|
+
docs_to_upsert = {}
|
|
283
283
|
for doc in batch:
|
|
284
284
|
doc_id = doc["id"]
|
|
285
285
|
embedding = self.embedding_function([
|
|
@@ -437,7 +437,7 @@ class MongoDBAtlasVectorDB(VectorDB):
|
|
|
437
437
|
if include is None:
|
|
438
438
|
include_fields = {"_id": 1, "content": 1, "metadata": 1}
|
|
439
439
|
else:
|
|
440
|
-
include_fields =
|
|
440
|
+
include_fields = dict.fromkeys(set(include).union({"_id"}), 1)
|
|
441
441
|
collection = self.get_collection(collection_name)
|
|
442
442
|
if ids is not None:
|
|
443
443
|
docs = collection.find({"_id": {"$in": ids}}, include_fields)
|
|
@@ -277,7 +277,7 @@ class WebSurferAgent(ConversableAgent):
|
|
|
277
277
|
self._assistant.reset() # type: ignore[no-untyped-call]
|
|
278
278
|
|
|
279
279
|
# Clone the messages to give context
|
|
280
|
-
self._assistant.chat_messages[self._user_proxy] =
|
|
280
|
+
self._assistant.chat_messages[self._user_proxy] = []
|
|
281
281
|
history = messages[0 : len(messages) - 1]
|
|
282
282
|
for message in history:
|
|
283
283
|
self._assistant.chat_messages[self._user_proxy].append(message)
|