lionagi 0.0.206__py3-none-any.whl → 0.0.208__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/_services/ollama.py +2 -2
- lionagi/core/branch/branch.py +517 -265
- lionagi/core/branch/branch_manager.py +0 -1
- lionagi/core/branch/conversation.py +640 -337
- lionagi/core/core_util.py +0 -59
- lionagi/core/sessions/session.py +137 -64
- lionagi/tools/tool_manager.py +39 -62
- lionagi/utils/__init__.py +3 -2
- lionagi/utils/call_util.py +9 -7
- lionagi/utils/sys_util.py +287 -255
- lionagi/version.py +1 -1
- {lionagi-0.0.206.dist-info → lionagi-0.0.208.dist-info}/METADATA +1 -1
- {lionagi-0.0.206.dist-info → lionagi-0.0.208.dist-info}/RECORD +16 -17
- lionagi/utils/pd_util.py +0 -57
- {lionagi-0.0.206.dist-info → lionagi-0.0.208.dist-info}/LICENSE +0 -0
- {lionagi-0.0.206.dist-info → lionagi-0.0.208.dist-info}/WHEEL +0 -0
- {lionagi-0.0.206.dist-info → lionagi-0.0.208.dist-info}/top_level.txt +0 -0
lionagi/core/branch/branch.py
CHANGED
@@ -1,17 +1,17 @@
|
|
1
1
|
import json
|
2
|
-
|
3
2
|
import pandas as pd
|
4
3
|
|
5
4
|
from typing import Any, Callable, Dict, List, Optional, Union
|
6
5
|
from collections import deque
|
7
|
-
import asyncio
|
8
6
|
from dotenv import load_dotenv
|
9
7
|
|
10
|
-
from lionagi.utils import as_dict, get_flattened_keys, alcall, lcall,
|
8
|
+
from lionagi.utils import as_dict, get_flattened_keys, alcall, lcall, to_list
|
9
|
+
from lionagi.utils.sys_util import is_same_dtype
|
11
10
|
from lionagi.schema import Tool
|
12
11
|
from lionagi._services.base_service import StatusTracker, BaseService
|
13
12
|
from lionagi._services.oai import OpenAIService
|
14
13
|
from lionagi._services.openrouter import OpenRouterService
|
14
|
+
|
15
15
|
from lionagi.configs.oai_configs import oai_schema
|
16
16
|
from lionagi.configs.openrouter_configs import openrouter_schema
|
17
17
|
from lionagi.tools.tool_manager import ToolManager
|
@@ -19,63 +19,91 @@ from lionagi.tools.tool_manager import ToolManager
|
|
19
19
|
from ..messages.messages import Instruction, System
|
20
20
|
from ..instruction_set.instruction_set import InstructionSet
|
21
21
|
|
22
|
-
from .conversation import Conversation
|
22
|
+
from .conversation import Conversation, validate_messages
|
23
23
|
from .branch_manager import Request
|
24
|
-
from ..core_util import validate_messages
|
25
24
|
|
26
25
|
load_dotenv()
|
27
26
|
|
28
|
-
oai_service = OpenAIService()
|
29
27
|
|
30
28
|
class Branch(Conversation):
|
31
29
|
"""
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
30
|
+
Manages a conversation branch within the application, handling messages, instruction sets,
|
31
|
+
tool registrations, and service interactions for a single conversation flow. Extends the
|
32
|
+
Conversation class to provide specialized functionalities like message handling, tool
|
33
|
+
management, and integration with external services.
|
36
34
|
|
37
35
|
Attributes:
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
36
|
+
messages (pd.DataFrame): Dataframe storing conversation messages.
|
37
|
+
instruction_sets (Dict[str, InstructionSet]): Dictionary mapping instruction set names to their instances.
|
38
|
+
tool_manager (ToolManager): Manages tools available within the conversation.
|
39
|
+
status_tracker (StatusTracker): Tracks the status of various tasks within the conversation.
|
40
|
+
name (Optional[str]): Identifier for the branch.
|
41
|
+
pending_ins (Dict): Dictionary storing incoming requests.
|
42
|
+
pending_outs (deque): Queue for outgoing requests.
|
43
|
+
service (BaseService): Service instance for interaction with external services.
|
44
|
+
llmconfig (Dict): Configuration for language model interactions.
|
45
|
+
|
46
|
+
Methods:
|
47
|
+
__init__(self, name=None, messages=None, instruction_sets=None, tool_manager=None,
|
48
|
+
service=None, llmconfig=None):
|
49
|
+
Initializes a new Branch instance with optional configurations.
|
50
|
+
|
51
|
+
clone(self) -> 'Branch':
|
52
|
+
Creates a deep copy of the current Branch instance.
|
53
|
+
|
54
|
+
merge_branch(self, branch: 'Branch', update: True):
|
55
|
+
Merges another branch into the current Branch instance.
|
56
|
+
|
57
|
+
send(self, to_name: str, title: str, package: Any):
|
58
|
+
Sends a request package to a specified recipient.
|
59
|
+
|
60
|
+
receive(self, from_name: str, messages=True, tool=True, service=True, llmconfig=True):
|
61
|
+
Processes and integrates received request packages based on their titles.
|
62
|
+
|
63
|
+
receive_all(self):
|
64
|
+
Processes all pending incoming requests from all senders.
|
65
|
+
|
66
|
+
call_chatcompletion(self, sender=None, with_sender=False, **kwargs):
|
67
|
+
Asynchronously calls the chat completion service with the current message queue.
|
68
|
+
|
69
|
+
chat(self, instruction: Union[Instruction, str], context=None, sender=None, system=None,
|
70
|
+
tools=False, out=True, invoke=True, **kwargs) -> Any:
|
71
|
+
Asynchronously handles a chat interaction within the branch.
|
72
|
+
|
73
|
+
ReAct(self, instruction: Union[Instruction, str], context=None, sender=None, system=None,
|
74
|
+
tools=None, num_rounds=1, **kwargs):
|
75
|
+
Performs a sequence of reasoning and action based on the given instruction over multiple rounds.
|
76
|
+
|
77
|
+
auto_followup(self, instruction: Union[Instruction, str], context=None, sender=None,
|
78
|
+
system=None, tools=False, max_followup=3, out=True, **kwargs) -> None:
|
79
|
+
Automatically performs follow-up actions until a specified condition is met or the maximum number of follow-ups is reached.
|
80
|
+
|
81
|
+
Note:
|
82
|
+
This class is designed to be used within an asynchronous environment, where methods like
|
83
|
+
`chat`, `ReAct`, and `auto_followup` are particularly useful for handling complex conversation flows.
|
55
84
|
"""
|
56
|
-
|
85
|
+
|
57
86
|
def __init__(
|
58
87
|
self,
|
59
88
|
name: Optional[str] = None,
|
60
|
-
dir: Optional[str] = None,
|
61
89
|
messages: Optional[pd.DataFrame] = None,
|
62
90
|
instruction_sets: Optional[Dict[str, InstructionSet]] = None,
|
63
91
|
tool_manager: Optional[ToolManager] = None,
|
64
|
-
service:
|
92
|
+
service : Optional[BaseService] = None,
|
65
93
|
llmconfig: Optional[Dict] = None,
|
66
94
|
):
|
67
95
|
"""
|
68
96
|
Initializes a new Branch instance.
|
69
97
|
|
70
98
|
Args:
|
71
|
-
|
72
|
-
messages (Optional[pd.DataFrame]): A DataFrame containing conversation messages.
|
73
|
-
instruction_sets (Optional[Dict[str, InstructionSet]]):
|
74
|
-
tool_manager (Optional[ToolManager]):
|
75
|
-
service (
|
76
|
-
llmconfig (Optional[Dict]): Configuration for
|
77
|
-
"""
|
78
|
-
super().__init__(
|
99
|
+
name (Optional[str]): Name of the branch, providing an identifier within the conversational system. Defaults to None.
|
100
|
+
messages (Optional[pd.DataFrame]): A pandas DataFrame containing the conversation's messages. Initializes with an empty DataFrame if None. Defaults to None.
|
101
|
+
instruction_sets (Optional[Dict[str, InstructionSet]]): Dictionary mapping instruction set names to InstructionSet objects for conversation flow management. Defaults to {}.
|
102
|
+
tool_manager (Optional[ToolManager]): Manages tools within the branch. Creates a new instance if None. Defaults to None.
|
103
|
+
service (Optional[BaseService]): Interacts with external services. Initializes a default service based on branch configuration if None. Defaults to None.
|
104
|
+
llmconfig (Optional[Dict]): Configuration for language model interactions. Sets up default configuration based on the service type if None. Defaults to None.
|
105
|
+
"""
|
106
|
+
super().__init__()
|
79
107
|
self.messages = (
|
80
108
|
messages
|
81
109
|
if messages is not None
|
@@ -85,101 +113,108 @@ class Branch(Conversation):
|
|
85
113
|
)
|
86
114
|
self.instruction_sets = instruction_sets if instruction_sets else {}
|
87
115
|
self.tool_manager = tool_manager if tool_manager else ToolManager()
|
88
|
-
|
89
|
-
self.service = service if service else oai_service
|
90
116
|
self.status_tracker = StatusTracker()
|
91
|
-
|
92
|
-
self.llmconfig = llmconfig
|
93
|
-
else:
|
94
|
-
if isinstance(service, OpenAIService):
|
95
|
-
self.llmconfig = oai_schema["chat/completions"]["config"]
|
96
|
-
elif isinstance(service, OpenRouterService):
|
97
|
-
self.llmconfig = openrouter_schema["chat/completions"]["config"]
|
98
|
-
else:
|
99
|
-
self.llmconfig = {}
|
100
|
-
|
117
|
+
self._add_service(service, llmconfig)
|
101
118
|
self.name = name
|
102
119
|
self.pending_ins = {}
|
103
120
|
self.pending_outs = deque()
|
104
121
|
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
"""
|
109
|
-
Change the system message of the conversation.
|
122
|
+
@property
|
123
|
+
def chat_messages(self):
|
124
|
+
return self._to_chatcompletion_message()
|
110
125
|
|
111
|
-
|
112
|
-
|
113
|
-
|
126
|
+
@property
|
127
|
+
def chat_messages_with_sender(self):
|
128
|
+
return self._to_chatcompletion_message(with_sender=True)
|
114
129
|
|
115
|
-
|
116
|
-
|
130
|
+
@property
|
131
|
+
def messages_describe(self) -> Dict[str, Any]:
|
132
|
+
return {
|
133
|
+
"total_messages": len(self.messages),
|
134
|
+
"summary_by_role": self._info(),
|
135
|
+
"summary_by_sender": self._info(use_sender=True),
|
136
|
+
"instruction_sets": self.instruction_sets,
|
137
|
+
"registered_tools": self.tool_manager.registry,
|
138
|
+
"messages": [
|
139
|
+
msg.to_dict() for _, msg in self.messages.iterrows()
|
140
|
+
],
|
141
|
+
}
|
117
142
|
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
"""
|
122
|
-
if len(self.messages[self.messages.role == 'system']) == 0:
|
123
|
-
raise ValueError("There is no system message in the messages.")
|
124
|
-
if isinstance(system, (str, Dict)):
|
125
|
-
system = System(system, sender=sender)
|
126
|
-
if isinstance(system, System):
|
127
|
-
message_dict = system.to_dict()
|
128
|
-
if sender:
|
129
|
-
message_dict['sender'] = sender
|
130
|
-
message_dict['timestamp'] = str(pd.Timestamp.now())
|
131
|
-
sys_index = self.messages[self.messages.role == 'system'].index
|
132
|
-
self.messages.loc[sys_index[0]] = message_dict
|
143
|
+
@property
|
144
|
+
def has_tools(self) -> bool:
|
145
|
+
return self.tool_manager.registry != {}
|
133
146
|
|
134
|
-
|
135
|
-
raise ValueError("Input cannot be converted into a system message.")
|
147
|
+
# ----- tool manager methods ----- #
|
136
148
|
|
137
149
|
def register_tools(self, tools: Union[Tool, List[Tool]]):
|
138
150
|
"""
|
139
|
-
|
151
|
+
Registers a tool or a list of tools with the branch's tool manager.
|
152
|
+
|
153
|
+
This makes the tools available for use within the conversation.
|
140
154
|
|
141
155
|
Args:
|
142
|
-
tools (Union[Tool, List[Tool]]):
|
156
|
+
tools (Union[Tool, List[Tool]]): A single Tool instance or a list of Tool instances to be registered.
|
143
157
|
|
144
158
|
Examples:
|
145
|
-
>>> tool = Tool(name="calculator")
|
146
159
|
>>> branch.register_tools(tool)
|
160
|
+
>>> branch.register_tools([tool1, tool2])
|
147
161
|
"""
|
148
162
|
if not isinstance(tools, list):
|
149
163
|
tools = [tools]
|
150
164
|
self.tool_manager.register_tools(tools=tools)
|
151
165
|
|
152
|
-
def delete_tool(self,
|
166
|
+
def delete_tool(self, tools: Union[Tool, List[Tool], str, List[str]], verbose=True) -> bool:
|
153
167
|
"""
|
154
|
-
|
168
|
+
Deletes one or more tools from the branch's tool manager registry.
|
169
|
+
|
170
|
+
This can be done using either tool instances or their names.
|
155
171
|
|
156
172
|
Args:
|
157
|
-
|
173
|
+
tools (Union[Tool, List[Tool], str, List[str]]): A single Tool instance, a list of Tool instances, a tool name, or a list of tool names to be deleted.
|
174
|
+
verbose (bool): If True, prints a success message upon successful deletion. Defaults to True.
|
158
175
|
|
159
176
|
Returns:
|
160
|
-
bool: True if the tool
|
177
|
+
bool: True if the tool(s) were successfully deleted, False otherwise.
|
161
178
|
|
162
179
|
Examples:
|
163
|
-
>>> branch.delete_tool("
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
180
|
+
>>> branch.delete_tool("tool_name")
|
181
|
+
>>> branch.delete_tool(["tool_name1", "tool_name2"])
|
182
|
+
>>> branch.delete_tool(tool_instance)
|
183
|
+
>>> branch.delete_tool([tool_instance1, tool_instance2])
|
184
|
+
"""
|
185
|
+
if isinstance(tools, list):
|
186
|
+
if is_same_dtype(tools, str):
|
187
|
+
for tool in tools:
|
188
|
+
if tool in self.tool_manager.registry:
|
189
|
+
self.tool_manager.registry.pop(tool)
|
190
|
+
if verbose:
|
191
|
+
print("tools successfully deleted")
|
192
|
+
return True
|
193
|
+
elif is_same_dtype(tools, Tool):
|
194
|
+
for tool in tools:
|
195
|
+
if tool.name in self.tool_manager.registry:
|
196
|
+
self.tool_manager.registry.pop(tool.name)
|
197
|
+
if verbose:
|
198
|
+
print("tools successfully deleted")
|
199
|
+
return True
|
200
|
+
if verbose:
|
201
|
+
print("tools deletion failed")
|
169
202
|
return False
|
170
203
|
|
204
|
+
# ----- branch manipulation ----- #
|
171
205
|
def clone(self) -> 'Branch':
|
172
206
|
"""
|
173
|
-
|
207
|
+
Creates a deep copy of the current Branch instance.
|
208
|
+
|
209
|
+
This method duplicates the Branch's state, including its messages, instruction sets, and tool registrations, but creates a new ToolManager instance for the cloned branch.
|
174
210
|
|
175
211
|
Returns:
|
176
|
-
Branch: A new Branch
|
212
|
+
Branch: A new Branch instance that is a deep copy of the current instance.
|
177
213
|
|
178
214
|
Examples:
|
179
215
|
>>> cloned_branch = branch.clone()
|
180
216
|
"""
|
181
217
|
cloned = Branch(
|
182
|
-
dir = self.logger.dir,
|
183
218
|
messages=self.messages.copy(),
|
184
219
|
instruction_sets=self.instruction_sets.copy(),
|
185
220
|
tool_manager=ToolManager()
|
@@ -193,14 +228,18 @@ class Branch(Conversation):
|
|
193
228
|
|
194
229
|
def merge_branch(self, branch: 'Branch', update: bool = True):
|
195
230
|
"""
|
196
|
-
|
231
|
+
Merges another branch into the current Branch instance.
|
232
|
+
|
233
|
+
Incorporates messages, instruction sets, and tool registrations from the specified branch. Optionally updates existing instruction sets and tools if duplicates are found.
|
197
234
|
|
198
235
|
Args:
|
199
|
-
branch (Branch): The
|
200
|
-
update (bool): If True,
|
201
|
-
otherwise only add non-existing ones.
|
236
|
+
branch (Branch): The branch to merge into the current branch.
|
237
|
+
update (bool): If True, existing instruction sets and tools are updated with those from the merged branch. Defaults to True.
|
202
238
|
|
239
|
+
Examples:
|
240
|
+
>>> branch.merge_branch(another_branch)
|
203
241
|
"""
|
242
|
+
|
204
243
|
message_copy = branch.messages.copy()
|
205
244
|
self.messages = self.messages.merge(message_copy, how='outer')
|
206
245
|
|
@@ -218,136 +257,153 @@ class Branch(Conversation):
|
|
218
257
|
if key not in self.tool_manager.registry:
|
219
258
|
self.tool_manager.registry[key] = value
|
220
259
|
|
221
|
-
|
222
|
-
|
260
|
+
|
261
|
+
# ----- intra-branch communication methods ----- #
|
262
|
+
def send(self, to_name, title, package):
|
223
263
|
"""
|
224
|
-
|
264
|
+
Sends a request package to a specified recipient.
|
225
265
|
|
226
|
-
|
227
|
-
|
266
|
+
Packages are queued in `pending_outs` for dispatch. The function doesn't immediately send the package but prepares it for delivery.
|
267
|
+
|
268
|
+
Args:
|
269
|
+
to_name (str): The name of the recipient branch.
|
270
|
+
title (str): The title or category of the request (e.g., 'messages', 'tool', 'service', 'llmconfig').
|
271
|
+
package (Any): The actual data or object to be sent, its expected type depends on the title.
|
228
272
|
|
229
273
|
Examples:
|
230
|
-
>>>
|
231
|
-
>>>
|
232
|
-
0
|
274
|
+
>>> branch.send("another_branch", "messages", message_dataframe)
|
275
|
+
>>> branch.send("service_branch", "service", service_config)
|
233
276
|
"""
|
234
|
-
|
235
|
-
|
236
|
-
"summary_by_role": self.info(),
|
237
|
-
"summary_by_sender": self.info(use_sender=True),
|
238
|
-
"instruction_sets": self.instruction_sets,
|
239
|
-
"registered_tools": self.tool_manager.registry,
|
240
|
-
"messages": [
|
241
|
-
msg.to_dict() for _, msg in self.messages.iterrows()
|
242
|
-
],
|
243
|
-
}
|
277
|
+
request = Request(from_name=self.name, to_name=to_name, title=title, request=package)
|
278
|
+
self.pending_outs.append(request)
|
244
279
|
|
245
|
-
def
|
280
|
+
def receive(self, from_name, messages=True, tool=True, service=True, llmconfig=True):
|
246
281
|
"""
|
247
|
-
|
282
|
+
Processes and integrates received request packages based on their titles.
|
248
283
|
|
249
|
-
|
250
|
-
|
284
|
+
Handles incoming requests by updating the branch's state with the received data. It can selectively process requests based on the type specified by the `title` of the request.
|
285
|
+
|
286
|
+
Args:
|
287
|
+
from_name (str): The name of the sender whose packages are to be processed.
|
288
|
+
messages (bool): If True, processes 'messages' requests. Defaults to True.
|
289
|
+
tool (bool): If True, processes 'tool' requests. Defaults to True.
|
290
|
+
service (bool): If True, processes 'service' requests. Defaults to True.
|
291
|
+
llmconfig (bool): If True, processes 'llmconfig' requests. Defaults to True.
|
292
|
+
|
293
|
+
Raises:
|
294
|
+
ValueError: If no package is found from the specified sender, or if any of the packages have an invalid format.
|
251
295
|
|
252
296
|
Examples:
|
253
|
-
>>>
|
297
|
+
>>> branch.receive("another_branch")
|
254
298
|
"""
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
if isinstance(content_, str):
|
261
|
-
try:
|
262
|
-
content_ = json.dumps(as_dict(content_))
|
263
|
-
except Exception as e:
|
264
|
-
raise ValueError(f"Error in serealizing, {row['node_id']} {content_}: {e}")
|
265
|
-
|
266
|
-
out = {"role": row['role'], "content": content_}
|
267
|
-
message.append(out)
|
268
|
-
return message
|
299
|
+
skipped_requests = deque()
|
300
|
+
if from_name not in self.pending_ins:
|
301
|
+
raise ValueError(f'No package from {from_name}')
|
302
|
+
while self.pending_ins[from_name]:
|
303
|
+
request = self.pending_ins[from_name].popleft()
|
269
304
|
|
270
|
-
|
305
|
+
if request.title == 'messages' and messages:
|
306
|
+
if not isinstance(request.request, pd.DataFrame):
|
307
|
+
raise ValueError('Invalid messages format')
|
308
|
+
validate_messages(request.request)
|
309
|
+
self.messages = self.messages.merge(request.request, how='outer')
|
310
|
+
continue
|
311
|
+
|
312
|
+
elif request.title == 'tool' and tool:
|
313
|
+
if not isinstance(request.request, Tool):
|
314
|
+
raise ValueError('Invalid tool format')
|
315
|
+
self.tool_manager.register_tools([request.request])
|
316
|
+
|
317
|
+
elif request.title == 'service' and service:
|
318
|
+
if not isinstance(request.request, BaseService):
|
319
|
+
raise ValueError('Invalid service format')
|
320
|
+
self.service = request.request
|
321
|
+
|
322
|
+
elif request.title == 'llmconfig' and llmconfig:
|
323
|
+
if not isinstance(request.request, dict):
|
324
|
+
raise ValueError('Invalid llmconfig format')
|
325
|
+
self.llmconfig.update(request.request)
|
326
|
+
|
327
|
+
else:
|
328
|
+
skipped_requests.append(request)
|
329
|
+
|
330
|
+
self.pending_ins[from_name] = skipped_requests
|
331
|
+
|
332
|
+
def receive_all(self):
|
271
333
|
"""
|
272
|
-
|
334
|
+
Processes all pending incoming requests from all senders.
|
273
335
|
|
274
|
-
|
275
|
-
bool: True if the conversation has been invoked, False otherwise.
|
336
|
+
This method iterates through all senders with pending requests and processes each using the `receive` method. It ensures that all queued incoming data is integrated into the branch's state.
|
276
337
|
|
338
|
+
Examples:
|
339
|
+
>>> branch.receive_all()
|
277
340
|
"""
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
return False
|
286
|
-
|
287
|
-
async def call_chatcompletion(self, **kwargs):
|
341
|
+
for key in list(self.pending_ins.keys()):
|
342
|
+
self.receive(key)
|
343
|
+
|
344
|
+
|
345
|
+
# ----- service methods ----- #
|
346
|
+
|
347
|
+
async def call_chatcompletion(self, sender=None, with_sender=False, **kwargs):
|
288
348
|
"""
|
289
|
-
|
349
|
+
Asynchronously calls the chat completion service with the current message queue.
|
290
350
|
|
291
|
-
This method
|
292
|
-
with the response.
|
351
|
+
This method prepares the messages for chat completion, sends the request to the configured service, and handles the response. The method supports additional keyword arguments that are passed directly to the service.
|
293
352
|
|
294
353
|
Args:
|
295
|
-
|
354
|
+
sender (Optional[str]): The name of the sender to be included in the chat completion request. Defaults to None.
|
355
|
+
with_sender (bool): If True, includes the sender's name in the messages. Defaults to False.
|
356
|
+
**kwargs: Arbitrary keyword arguments passed directly to the chat completion service.
|
296
357
|
|
358
|
+
Examples:
|
359
|
+
>>> await branch.call_chatcompletion()
|
297
360
|
"""
|
298
|
-
messages = self.
|
299
|
-
payload, completion = await self.service.serve_chat(
|
361
|
+
messages = self.chat_messages if not with_sender else self.chat_messages_with_sender
|
362
|
+
payload, completion = await self.service.serve_chat(
|
363
|
+
messages=messages, **kwargs)
|
300
364
|
if "choices" in completion:
|
301
|
-
|
302
|
-
|
365
|
+
add_msg_config = {"response":completion['choices'][0]}
|
366
|
+
if sender is not None:
|
367
|
+
add_msg_config["sender"] = sender
|
368
|
+
|
369
|
+
self.add_message(**add_msg_config)
|
303
370
|
self.status_tracker.num_tasks_succeeded += 1
|
304
371
|
else:
|
305
372
|
self.status_tracker.num_tasks_failed += 1
|
306
373
|
|
307
|
-
|
308
|
-
def has_tools(self) -> bool:
|
309
|
-
"""
|
310
|
-
Check if there are any tools registered in the tool manager.
|
311
|
-
|
312
|
-
Returns:
|
313
|
-
bool: True if there are tools registered, False otherwise.
|
314
|
-
|
315
|
-
"""
|
316
|
-
return self.tool_manager.registry != {}
|
374
|
+
# ----- chat methods ----- #
|
317
375
|
|
318
376
|
async def chat(
|
319
377
|
self,
|
320
378
|
instruction: Union[Instruction, str],
|
321
|
-
system: Optional[Union[System, str, Dict[str, Any]]] = None,
|
322
379
|
context: Optional[Any] = None,
|
323
|
-
out: bool = True,
|
324
380
|
sender: Optional[str] = None,
|
325
|
-
|
381
|
+
system: Optional[Union[System, str, Dict[str, Any]]] = None,
|
326
382
|
tools: Union[bool, Tool, List[Tool], str, List[str]] = False,
|
383
|
+
out: bool = True,
|
384
|
+
invoke: bool = True,
|
327
385
|
**kwargs
|
328
386
|
) -> Any:
|
329
387
|
"""
|
330
|
-
|
388
|
+
Asynchronously handles a chat interaction within the branch.
|
331
389
|
|
332
|
-
This method
|
333
|
-
and performs tool invocations if specified.
|
390
|
+
This method adds a new message based on the provided instruction, optionally using specified tools, and processes the chat completion.
|
334
391
|
|
335
392
|
Args:
|
336
|
-
instruction (Union[Instruction, str]): The
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
**kwargs:
|
393
|
+
instruction (Union[Instruction, str]): The instruction or query to process.
|
394
|
+
context (Optional[Any]): Additional context for the chat completion request. Defaults to None.
|
395
|
+
sender (Optional[str]): The name of the sender. Defaults to None.
|
396
|
+
system (Optional[Union[System, str, Dict[str, Any]]]): System message or configuration. Defaults to None.
|
397
|
+
tools (Union[bool, Tool, List[Tool], str, List[str]]): Specifies if and which tools to use in the chat. Defaults to False.
|
398
|
+
out (bool): If True, the output of the chat completion is returned. Defaults to True.
|
399
|
+
invoke (bool): If True, invokes any action as determined by the chat completion. Defaults to True.
|
400
|
+
**kwargs: Arbitrary keyword arguments for further customization.
|
344
401
|
|
345
402
|
Returns:
|
346
|
-
Any: The
|
403
|
+
Any: The result of the chat interaction, which could be varied based on the input and configuration.
|
347
404
|
|
348
405
|
Examples:
|
349
|
-
>>> result = await branch.chat("
|
350
|
-
>>> print(result)
|
406
|
+
>>> result = await branch.chat("How's the weather?")
|
351
407
|
"""
|
352
408
|
|
353
409
|
if system:
|
@@ -363,6 +419,9 @@ class Branch(Conversation):
|
|
363
419
|
kwargs = self.tool_manager._tool_parser(tools=tools, **kwargs)
|
364
420
|
|
365
421
|
config = {**self.llmconfig, **kwargs}
|
422
|
+
if sender is not None:
|
423
|
+
config.update({"sender": sender})
|
424
|
+
|
366
425
|
await self.call_chatcompletion(**config)
|
367
426
|
|
368
427
|
async def _output():
|
@@ -376,7 +435,6 @@ class Branch(Conversation):
|
|
376
435
|
)
|
377
436
|
|
378
437
|
outs = await alcall(func_calls, self.tool_manager.invoke)
|
379
|
-
|
380
438
|
outs = to_list(outs, flatten=True)
|
381
439
|
|
382
440
|
for out_, f in zip(outs, func_calls):
|
@@ -400,116 +458,310 @@ class Branch(Conversation):
|
|
400
458
|
|
401
459
|
return await _output()
|
402
460
|
|
461
|
+
async def ReAct(
|
462
|
+
self,
|
463
|
+
instruction: Union[Instruction, str],
|
464
|
+
context = None,
|
465
|
+
sender = None,
|
466
|
+
system = None,
|
467
|
+
tools = None,
|
468
|
+
num_rounds: int = 1,
|
469
|
+
**kwargs
|
470
|
+
):
|
471
|
+
"""
|
472
|
+
Performs a sequence of reasoning and action based on the given instruction over multiple rounds.
|
473
|
+
|
474
|
+
In each round, the method reflects on the task, devises an action plan using available tools, and invokes the necessary tool usage to execute the plan.
|
475
|
+
|
476
|
+
Args:
|
477
|
+
instruction (Union[Instruction, str]): The initial task or question to start the reasoning and action process.
|
478
|
+
context: Optional context to influence the reasoning process. Defaults to None.
|
479
|
+
sender (Optional[str]): The name of the sender initiating the ReAct process. Defaults to None.
|
480
|
+
system: Optional system message or configuration to be considered during the process. Defaults to None.
|
481
|
+
tools: Specifies the tools to be considered for action plans. Defaults to None.
|
482
|
+
num_rounds (int): The number of reasoning-action rounds to be performed. Defaults to 1.
|
483
|
+
**kwargs: Arbitrary keyword arguments for further customization.
|
484
|
+
|
485
|
+
Returns:
|
486
|
+
The final output after completing the specified number of reasoning-action rounds.
|
487
|
+
|
488
|
+
Examples:
|
489
|
+
>>> await branch.ReAct("Prepare a report on recent sales trends.", num_rounds=2)
|
490
|
+
"""
|
491
|
+
if tools is not None:
|
492
|
+
if isinstance(tools, list) and isinstance(tools[0], Tool):
|
493
|
+
self.register_tools(tools)
|
494
|
+
|
495
|
+
if self.tool_manager.registry == {}:
|
496
|
+
raise ValueError("No tools found, You need to register tools for ReAct (reason-action)")
|
497
|
+
|
498
|
+
else:
|
499
|
+
kwargs = self.tool_manager._tool_parser(tools=True, **kwargs)
|
500
|
+
|
501
|
+
out = ''
|
502
|
+
i = 0
|
503
|
+
while i < num_rounds:
|
504
|
+
prompt = f"""you have {(num_rounds-i)*2} step left in current task. if available, integrate previous tool responses. perform reasoning and prepare action plan according to available tools only, apply divide and conquer technique.
|
505
|
+
"""
|
506
|
+
instruct = {"Notice": prompt}
|
507
|
+
|
508
|
+
if i == 0:
|
509
|
+
instruct["Task"] = instruction
|
510
|
+
out = await self.chat(
|
511
|
+
instruction=instruct, context=context,
|
512
|
+
system=system, sender=sender, **kwargs
|
513
|
+
)
|
514
|
+
|
515
|
+
elif i >0:
|
516
|
+
out = await self.chat(
|
517
|
+
instruction=instruct, sender=sender, **kwargs
|
518
|
+
)
|
519
|
+
|
520
|
+
prompt = f"""
|
521
|
+
you have {(num_rounds-i)*2-1} step left in current task, invoke tool usage to perform actions
|
522
|
+
"""
|
523
|
+
out = await self.chat(prompt, tool_choice="auto", tool_parsed=True, sender=sender, **kwargs)
|
524
|
+
|
525
|
+
i += 1
|
526
|
+
if not self._is_invoked():
|
527
|
+
return out
|
528
|
+
|
529
|
+
|
530
|
+
if self._is_invoked():
|
531
|
+
prompt = """
|
532
|
+
present the final result to user
|
533
|
+
"""
|
534
|
+
return await self.chat(prompt, sender=sender, tool_parsed=True, **kwargs)
|
535
|
+
else:
|
536
|
+
return out
|
537
|
+
|
538
|
+
# async def auto_ReAct(
|
539
|
+
# self,
|
540
|
+
# instruction: Union[Instruction, str],
|
541
|
+
# context = None,
|
542
|
+
# sender = None,
|
543
|
+
# system = None,
|
544
|
+
# tools = None,
|
545
|
+
# max_rounds: int = 1,
|
546
|
+
|
547
|
+
# fallback: Optional[Callable] = None,
|
548
|
+
# fallback_kwargs: Optional[Dict] = None,
|
549
|
+
# **kwargs
|
550
|
+
# ):
|
551
|
+
# if tools is not None:
|
552
|
+
# if isinstance(tools, list) and isinstance(tools[0], Tool):
|
553
|
+
# self.register_tools(tools)
|
554
|
+
|
555
|
+
# if self.tool_manager.registry == {}:
|
556
|
+
# raise ValueError("No tools found, You need to register tools for ReAct (reason-action)")
|
557
|
+
|
558
|
+
# else:
|
559
|
+
# kwargs = self.tool_manager._tool_parser(tools=True, **kwargs)
|
560
|
+
|
561
|
+
# i = 0
|
562
|
+
# while i < max_rounds:
|
563
|
+
# prompt = f"""
|
564
|
+
# you have {(max_rounds-i)*2} step left in current task. reflect, perform
|
565
|
+
# reason for action plan according to available tools only, apply divide and conquer technique, retain from invoking functions
|
566
|
+
# """
|
567
|
+
# instruct = {"Notice": prompt}
|
568
|
+
|
569
|
+
# if i == 0:
|
570
|
+
# instruct["Task"] = instruction
|
571
|
+
# await self.chat(
|
572
|
+
# instruction=instruct, context=context,
|
573
|
+
# system=system, out=False, sender=sender, **kwargs
|
574
|
+
# )
|
575
|
+
|
576
|
+
# elif i >0:
|
577
|
+
# await self.chat(
|
578
|
+
# instruction=instruct, out=False, sender=sender, **kwargs
|
579
|
+
# )
|
580
|
+
|
581
|
+
# prompt = f"""
|
582
|
+
# you have {(max_rounds-i)*2-1} step left in current task, invoke tool usage to perform the action
|
583
|
+
# """
|
584
|
+
# await self.chat(prompt, tool_choice="auto", tool_parsed=True, out=False,sender=sender, **kwargs)
|
585
|
+
|
586
|
+
# i += 1
|
587
|
+
|
588
|
+
# if self._is_invoked():
|
589
|
+
# if fallback is not None:
|
590
|
+
# if asyncio.iscoroutinefunction(fallback):
|
591
|
+
# return await fallback(**fallback_kwargs)
|
592
|
+
# else:
|
593
|
+
# return fallback(**fallback_kwargs)
|
594
|
+
# prompt = """
|
595
|
+
# present the final result to user
|
596
|
+
# """
|
597
|
+
# return await self.chat(prompt, sender=sender, tool_parsed=True, **kwargs)
|
598
|
+
|
403
599
|
async def auto_followup(
|
404
600
|
self,
|
405
601
|
instruction: Union[Instruction, str],
|
406
|
-
|
602
|
+
context = None,
|
603
|
+
sender = None,
|
604
|
+
system = None,
|
407
605
|
tools: Union[bool, Tool, List[Tool], str, List[str], List[Dict]] = False,
|
408
|
-
|
409
|
-
|
606
|
+
max_followup: int = 3,
|
607
|
+
out=True,
|
410
608
|
**kwargs
|
411
609
|
) -> None:
|
610
|
+
|
412
611
|
"""
|
413
|
-
Automatically
|
612
|
+
Automatically performs follow-up actions until a specified condition is met or the maximum number of follow-ups is reached.
|
414
613
|
|
415
|
-
This method
|
416
|
-
with an optional fallback if the maximum number of follow-ups is reached.
|
614
|
+
This method allows for iterative refinement and follow-up based on the instruction, using available tools and considering feedback from each step.
|
417
615
|
|
418
616
|
Args:
|
419
|
-
instruction (Union[Instruction, str]): The
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
617
|
+
instruction (Union[Instruction, str]): The instruction to initiate the follow-up process.
|
618
|
+
context: Optional context relevant to the follow-up actions. Defaults to None.
|
619
|
+
sender (Optional[str]): The name of the sender. Defaults to None.
|
620
|
+
system: Optional system configuration affecting the follow-up process. Defaults to None.
|
621
|
+
tools (Union[bool, Tool, List[Tool], str, List[str], List[Dict]]): Specifies the tools to be used during follow-up actions. Defaults to False.
|
622
|
+
max_followup (int): The maximum number of follow-up iterations. Defaults to 3.
|
623
|
+
out (bool): If True, the final result is returned. Defaults to True.
|
624
|
+
**kwargs: Arbitrary keyword arguments for additional customization.
|
625
|
+
|
626
|
+
Returns:
|
627
|
+
The final result after all follow-up actions are completed, if `out` is True.
|
425
628
|
|
426
629
|
Examples:
|
427
|
-
>>> await branch.auto_followup("
|
630
|
+
>>> await branch.auto_followup("Update the database with new entries.", max_followup=2)
|
428
631
|
"""
|
632
|
+
|
429
633
|
if self.tool_manager.registry != {} and tools:
|
430
634
|
kwargs = self.tool_manager._tool_parser(tools=tools, **kwargs)
|
431
635
|
|
432
|
-
|
433
|
-
while
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
636
|
+
n_tries = 0
|
637
|
+
while (max_followup - n_tries) > 0:
|
638
|
+
prompt = f"""
|
639
|
+
In the current task you are allowed a maximum of another {max_followup-n_tries} followup chats.
|
640
|
+
if further actions are needed, invoke tools usage. If you are done, present the final result
|
641
|
+
to user without further tool usage
|
642
|
+
"""
|
643
|
+
if n_tries > 0:
|
644
|
+
_out = await self.chat(prompt, sender=sender, tool_choice="auto", tool_parsed=True, **kwargs)
|
645
|
+
n_tries += 1
|
646
|
+
|
647
|
+
if not self._is_invoked():
|
648
|
+
return _out if out else None
|
649
|
+
|
650
|
+
elif n_tries == 0:
|
651
|
+
instruct = {"notice": prompt, "task": instruction}
|
652
|
+
out = await self.chat(
|
653
|
+
instruct, context=context, system=system, sender=sender, tool_choice="auto",
|
654
|
+
tool_parsed=True, **kwargs
|
655
|
+
)
|
656
|
+
n_tries += 1
|
657
|
+
|
658
|
+
if not self._is_invoked():
|
659
|
+
return _out if out else None
|
447
660
|
|
448
|
-
|
449
|
-
|
450
|
-
|
661
|
+
if self._is_invoked():
|
662
|
+
"""
|
663
|
+
In the current task, you are at your last step, present the final result to user
|
664
|
+
"""
|
665
|
+
return await self.chat(instruction, sender=sender, tool_parsed=True, **kwargs)
|
451
666
|
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
667
|
+
# async def followup(
|
668
|
+
# self,
|
669
|
+
# instruction: Union[Instruction, str],
|
670
|
+
# context = None,
|
671
|
+
# sender = None,
|
672
|
+
# system = None,
|
673
|
+
# tools: Union[bool, Tool, List[Tool], str, List[str], List[Dict]] = False,
|
674
|
+
# max_followup: int = 3,
|
675
|
+
# out=True,
|
676
|
+
# **kwargs
|
677
|
+
# ) -> None:
|
459
678
|
|
460
|
-
|
461
|
-
|
462
|
-
|
679
|
+
# """
|
680
|
+
# auto tool usages until LLM decides done. Then presents final results.
|
681
|
+
# """
|
463
682
|
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
683
|
+
# if self.tool_manager.registry != {} and tools:
|
684
|
+
# kwargs = self.tool_manager._tool_parser(tools=tools, **kwargs)
|
685
|
+
|
686
|
+
# n_tries = 0
|
687
|
+
# while (max_followup - n_tries) > 0:
|
688
|
+
# prompt = f"""
|
689
|
+
# In the current task you are allowed a maximum of another {max_followup-n_tries} followup chats.
|
690
|
+
# if further actions are needed, invoke tools usage. If you are done, present the final result
|
691
|
+
# to user without further tool usage.
|
692
|
+
# """
|
693
|
+
# if n_tries > 0:
|
694
|
+
# _out = await self.chat(prompt, sender=sender, tool_choice="auto", tool_parsed=True, **kwargs)
|
695
|
+
# n_tries += 1
|
696
|
+
|
697
|
+
# if not self._is_invoked():
|
698
|
+
# return _out if out else None
|
699
|
+
|
700
|
+
# elif n_tries == 0:
|
701
|
+
# instruct = {"notice": prompt, "task": instruction}
|
702
|
+
# out = await self.chat(
|
703
|
+
# instruct, context=context, system=system, sender=sender, tool_choice="auto",
|
704
|
+
# tool_parsed=True, **kwargs
|
705
|
+
# )
|
706
|
+
# n_tries += 1
|
707
|
+
|
708
|
+
# if not self._is_invoked():
|
709
|
+
# return _out if out else None
|
470
710
|
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
711
|
+
def _add_service(self, service, llmconfig):
|
712
|
+
service = service or OpenAIService()
|
713
|
+
self.service=service
|
714
|
+
if llmconfig:
|
715
|
+
self.llmconfig = llmconfig
|
716
|
+
else:
|
717
|
+
if isinstance(service, OpenAIService):
|
718
|
+
self.llmconfig = oai_schema["chat/completions"]["config"]
|
719
|
+
elif isinstance(service, OpenRouterService):
|
720
|
+
self.llmconfig = openrouter_schema["chat/completions"]["config"]
|
721
|
+
else:
|
722
|
+
self.llmconfig = {}
|
479
723
|
|
480
|
-
if request.title == 'messages' and messages:
|
481
|
-
if not isinstance(request.request, pd.DataFrame):
|
482
|
-
raise ValueError('Invalid messages format')
|
483
|
-
validate_messages(request.request)
|
484
|
-
self.messages = self.messages.merge(request.request, how='outer')
|
485
|
-
continue
|
486
724
|
|
487
|
-
|
488
|
-
|
489
|
-
raise ValueError('Invalid tool format')
|
490
|
-
self.tool_manager.register_tools([request.request])
|
725
|
+
def _to_chatcompletion_message(self, with_sender=False) -> List[Dict[str, Any]]:
|
726
|
+
message = []
|
491
727
|
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
728
|
+
for _, row in self.messages.iterrows():
|
729
|
+
content_ = row['content']
|
730
|
+
if content_.startswith('Sender'):
|
731
|
+
content_ = content_.split(':', 1)[1]
|
732
|
+
|
733
|
+
if isinstance(content_, str):
|
734
|
+
try:
|
735
|
+
content_ = json.dumps(as_dict(content_))
|
736
|
+
except Exception as e:
|
737
|
+
raise ValueError(f"Error in serealizing, {row['node_id']} {content_}: {e}")
|
738
|
+
|
739
|
+
out = {"role": row['role'], "content": content_}
|
740
|
+
if with_sender:
|
741
|
+
out['content'] = f"Sender {row['sender']}: {content_}"
|
742
|
+
|
743
|
+
message.append(out)
|
744
|
+
return message
|
496
745
|
|
497
|
-
elif request.title == 'llmconfig' and llmconfig:
|
498
|
-
if not isinstance(request.request, dict):
|
499
|
-
raise ValueError('Invalid llmconfig format')
|
500
|
-
self.llmconfig.update(request.request)
|
501
746
|
|
502
|
-
|
503
|
-
|
747
|
+
def _is_invoked(self) -> bool:
|
748
|
+
"""
|
749
|
+
Check if the conversation has been invoked with an action response.
|
504
750
|
|
505
|
-
|
751
|
+
Returns:
|
752
|
+
bool: True if the conversation has been invoked, False otherwise.
|
506
753
|
|
507
|
-
def receive_all(self):
|
508
|
-
"""
|
509
|
-
Process all pending incoming requests from all senders.
|
510
754
|
"""
|
511
|
-
|
512
|
-
|
755
|
+
content = self.messages.iloc[-1]['content']
|
756
|
+
try:
|
757
|
+
if (
|
758
|
+
as_dict(content)['action_response'].keys() >= {'function', 'arguments', 'output'}
|
759
|
+
):
|
760
|
+
return True
|
761
|
+
except:
|
762
|
+
return False
|
763
|
+
|
764
|
+
|
513
765
|
|
514
766
|
|
515
767
|
# def add_instruction_set(self, name: str, instruction_set: InstructionSet):
|