lionagi 0.0.201__py3-none-any.whl → 0.0.204__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/_services/anthropic.py +79 -1
- lionagi/_services/base_service.py +1 -1
- lionagi/_services/services.py +61 -25
- lionagi/_services/transformers.py +46 -0
- lionagi/agents/__init__.py +0 -0
- lionagi/configs/oai_configs.py +1 -1
- lionagi/configs/openrouter_configs.py +1 -1
- lionagi/core/__init__.py +3 -7
- lionagi/core/branch/__init__.py +0 -0
- lionagi/core/branch/branch.py +589 -0
- lionagi/core/branch/branch_manager.py +139 -0
- lionagi/core/branch/cluster.py +1 -0
- lionagi/core/branch/conversation.py +484 -0
- lionagi/core/core_util.py +59 -0
- lionagi/core/flow/__init__.py +0 -0
- lionagi/core/flow/flow.py +19 -0
- lionagi/core/instruction_set/__init__.py +0 -0
- lionagi/core/instruction_set/instruction_set.py +343 -0
- lionagi/core/messages/__init__.py +0 -0
- lionagi/core/messages/messages.py +176 -0
- lionagi/core/sessions/__init__.py +0 -0
- lionagi/core/sessions/session.py +428 -0
- lionagi/models/__init__.py +0 -0
- lionagi/models/base_model.py +0 -0
- lionagi/models/imodel.py +53 -0
- lionagi/schema/data_logger.py +75 -155
- lionagi/tests/test_utils/test_call_util.py +658 -657
- lionagi/tools/tool_manager.py +121 -188
- lionagi/utils/__init__.py +5 -10
- lionagi/utils/call_util.py +667 -585
- lionagi/utils/io_util.py +3 -0
- lionagi/utils/nested_util.py +17 -211
- lionagi/utils/pd_util.py +57 -0
- lionagi/utils/sys_util.py +220 -184
- lionagi/utils/url_util.py +55 -0
- lionagi/version.py +1 -1
- {lionagi-0.0.201.dist-info → lionagi-0.0.204.dist-info}/METADATA +12 -8
- {lionagi-0.0.201.dist-info → lionagi-0.0.204.dist-info}/RECORD +47 -32
- lionagi/core/branch.py +0 -193
- lionagi/core/conversation.py +0 -341
- lionagi/core/flow.py +0 -8
- lionagi/core/instruction_set.py +0 -150
- lionagi/core/messages.py +0 -243
- lionagi/core/sessions.py +0 -474
- /lionagi/{tools → agents}/planner.py +0 -0
- /lionagi/{tools → agents}/prompter.py +0 -0
- /lionagi/{tools → agents}/scorer.py +0 -0
- /lionagi/{tools → agents}/summarizer.py +0 -0
- /lionagi/{tools → agents}/validator.py +0 -0
- /lionagi/core/{flow_util.py → flow/flow_util.py} +0 -0
- {lionagi-0.0.201.dist-info → lionagi-0.0.204.dist-info}/LICENSE +0 -0
- {lionagi-0.0.201.dist-info → lionagi-0.0.204.dist-info}/WHEEL +0 -0
- {lionagi-0.0.201.dist-info → lionagi-0.0.204.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,589 @@
|
|
1
|
+
import json
|
2
|
+
|
3
|
+
import pandas as pd
|
4
|
+
|
5
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
6
|
+
from collections import deque
|
7
|
+
import asyncio
|
8
|
+
from dotenv import load_dotenv
|
9
|
+
|
10
|
+
from lionagi.utils import as_dict, get_flattened_keys, lcall
|
11
|
+
from lionagi.schema import Tool
|
12
|
+
from lionagi._services.base_service import StatusTracker, BaseService
|
13
|
+
from lionagi._services.oai import OpenAIService
|
14
|
+
from lionagi._services.openrouter import OpenRouterService
|
15
|
+
from lionagi.configs.oai_configs import oai_schema
|
16
|
+
from lionagi.configs.openrouter_configs import openrouter_schema
|
17
|
+
from lionagi.tools.tool_manager import ToolManager
|
18
|
+
|
19
|
+
from ..messages.messages import Instruction, System
|
20
|
+
from ..instruction_set.instruction_set import InstructionSet
|
21
|
+
|
22
|
+
from .conversation import Conversation
|
23
|
+
from .branch_manager import Request
|
24
|
+
from ..core_util import validate_messages
|
25
|
+
|
26
|
+
load_dotenv()
|
27
|
+
|
28
|
+
oai_service = OpenAIService()
|
29
|
+
|
30
|
+
class Branch(Conversation):
|
31
|
+
"""
|
32
|
+
Represents a conversation branch with messages, instruction sets, and tool management.
|
33
|
+
|
34
|
+
A `Branch` is a type of conversation that can have messages, system instructions, and registered tools
|
35
|
+
for interacting with external services or tools.
|
36
|
+
|
37
|
+
Attributes:
|
38
|
+
dir (str): The directory path for storing logs.
|
39
|
+
messages (pd.DataFrame): A DataFrame containing conversation messages.
|
40
|
+
instruction_sets (Dict[str, InstructionSet]): A dictionary of instruction sets mapped by their names.
|
41
|
+
tool_manager (ToolManager): An instance of ToolManager for managing tools.
|
42
|
+
service (OpenAIService): An instance of OpenAIService to interact with OpenAI API.
|
43
|
+
status_tracker (StatusTracker): An instance of StatusTracker to keep track of the status.
|
44
|
+
llmconfig (Dict): Configuration for the language model.
|
45
|
+
|
46
|
+
Examples:
|
47
|
+
>>> branch = Branch(dir="path/to/log")
|
48
|
+
>>> branch.add_instruction_set("greet", InstructionSet(instructions=["Hello", "Hi"]))
|
49
|
+
>>> branch.remove_instruction_set("greet")
|
50
|
+
True
|
51
|
+
>>> tool = Tool(name="calculator")
|
52
|
+
>>> branch.register_tools(tool)
|
53
|
+
>>> branch.messages_describe() # doctest: +SKIP
|
54
|
+
{'total_messages': 0, 'summary_by_role': ..., 'summary_by_sender': ..., 'instruction_sets': {}, 'registered_tools': {'calculator': ...}, 'messages': []}
|
55
|
+
"""
|
56
|
+
|
57
|
+
def __init__(
|
58
|
+
self,
|
59
|
+
name: Optional[str] = None,
|
60
|
+
dir: Optional[str] = None,
|
61
|
+
messages: Optional[pd.DataFrame] = None,
|
62
|
+
instruction_sets: Optional[Dict[str, InstructionSet]] = None,
|
63
|
+
tool_manager: Optional[ToolManager] = None,
|
64
|
+
service: OpenAIService = oai_service,
|
65
|
+
llmconfig: Optional[Dict] = None,
|
66
|
+
):
|
67
|
+
"""
|
68
|
+
Initializes a new Branch instance.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
dir (Optional[str]): The directory path for storing logs.
|
72
|
+
messages (Optional[pd.DataFrame]): A DataFrame containing conversation messages.
|
73
|
+
instruction_sets (Optional[Dict[str, InstructionSet]]): A dictionary of instruction sets.
|
74
|
+
tool_manager (Optional[ToolManager]): An instance of ToolManager for managing tools.
|
75
|
+
service (OpenAIService): The OpenAI service instance.
|
76
|
+
llmconfig (Optional[Dict]): Configuration for the language model.
|
77
|
+
"""
|
78
|
+
super().__init__(dir)
|
79
|
+
self.messages = (
|
80
|
+
messages
|
81
|
+
if messages is not None
|
82
|
+
else pd.DataFrame(
|
83
|
+
columns=["node_id", "role", "sender", "timestamp", "content"]
|
84
|
+
)
|
85
|
+
)
|
86
|
+
self.instruction_sets = instruction_sets if instruction_sets else {}
|
87
|
+
self.tool_manager = tool_manager if tool_manager else ToolManager()
|
88
|
+
|
89
|
+
self.service = service if service else oai_service
|
90
|
+
self.status_tracker = StatusTracker()
|
91
|
+
if llmconfig:
|
92
|
+
self.llmconfig = llmconfig
|
93
|
+
else:
|
94
|
+
if isinstance(service, OpenAIService):
|
95
|
+
self.llmconfig = oai_schema["chat/completions"]["config"]
|
96
|
+
elif isinstance(service, OpenRouterService):
|
97
|
+
self.llmconfig = openrouter_schema["chat/completions"]["config"]
|
98
|
+
else:
|
99
|
+
self.llmconfig = {}
|
100
|
+
|
101
|
+
self.name = name
|
102
|
+
self.pending_ins = {}
|
103
|
+
self.pending_outs = deque()
|
104
|
+
|
105
|
+
def change_first_system_message(
|
106
|
+
self, system: Union[str, Dict[str, Any], System], sender: Optional[str] = None
|
107
|
+
):
|
108
|
+
"""
|
109
|
+
Change the system message of the conversation.
|
110
|
+
|
111
|
+
Args:
|
112
|
+
system (Union[str, Dict[str, Any], System]): The new system message.
|
113
|
+
sender (Optional[str]): The sender of the system message.
|
114
|
+
|
115
|
+
Raises:
|
116
|
+
ValueError: If the input cannot be converted into a system message.
|
117
|
+
|
118
|
+
Examples:
|
119
|
+
>>> branch.change_first_system_message("System update", sender="admin")
|
120
|
+
>>> branch.change_first_system_message({"text": "System reboot", "type": "update"})
|
121
|
+
"""
|
122
|
+
if len(self.messages[self.messages.role == 'system']) == 0:
|
123
|
+
raise ValueError("There is no system message in the messages.")
|
124
|
+
if isinstance(system, (str, Dict)):
|
125
|
+
system = System(system, sender=sender)
|
126
|
+
if isinstance(system, System):
|
127
|
+
message_dict = system.to_dict()
|
128
|
+
if sender:
|
129
|
+
message_dict['sender'] = sender
|
130
|
+
message_dict['timestamp'] = str(pd.Timestamp.now())
|
131
|
+
sys_index = self.messages[self.messages.role == 'system'].index
|
132
|
+
self.messages.loc[sys_index[0]] = message_dict
|
133
|
+
|
134
|
+
else:
|
135
|
+
raise ValueError("Input cannot be converted into a system message.")
|
136
|
+
|
137
|
+
def register_tools(self, tools: Union[Tool, List[Tool]]):
|
138
|
+
"""
|
139
|
+
Register one or more tools with the conversation's tool manager.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
tools (Union[Tool, List[Tool]]): The tools to register.
|
143
|
+
|
144
|
+
Examples:
|
145
|
+
>>> tool = Tool(name="calculator")
|
146
|
+
>>> branch.register_tools(tool)
|
147
|
+
"""
|
148
|
+
if not isinstance(tools, list):
|
149
|
+
tools = [tools]
|
150
|
+
self.tool_manager.register_tools(tools=tools)
|
151
|
+
|
152
|
+
def delete_tool(self, name: str) -> bool:
|
153
|
+
"""
|
154
|
+
Delete a tool from the conversation's tool manager.
|
155
|
+
|
156
|
+
Args:
|
157
|
+
name (str): The name of the tool to delete.
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
bool: True if the tool was deleted, False otherwise.
|
161
|
+
|
162
|
+
Examples:
|
163
|
+
>>> branch.delete_tool("calculator")
|
164
|
+
True
|
165
|
+
"""
|
166
|
+
if name in self.tool_manager.registry:
|
167
|
+
self.tool_manager.registry.pop(name)
|
168
|
+
return True
|
169
|
+
return False
|
170
|
+
|
171
|
+
def clone(self) -> 'Branch':
|
172
|
+
"""
|
173
|
+
Create a clone of the conversation.
|
174
|
+
|
175
|
+
Returns:
|
176
|
+
Branch: A new Branch object that is a clone of the current conversation.
|
177
|
+
|
178
|
+
Examples:
|
179
|
+
>>> cloned_branch = branch.clone()
|
180
|
+
"""
|
181
|
+
cloned = Branch(
|
182
|
+
dir = self.logger.dir,
|
183
|
+
messages=self.messages.copy(),
|
184
|
+
instruction_sets=self.instruction_sets.copy(),
|
185
|
+
tool_manager=ToolManager()
|
186
|
+
)
|
187
|
+
tools = [
|
188
|
+
tool for tool in self.tool_manager.registry.values()]
|
189
|
+
|
190
|
+
cloned.register_tools(tools)
|
191
|
+
|
192
|
+
return cloned
|
193
|
+
|
194
|
+
def merge_branch(self, branch: 'Branch', update: bool = True):
|
195
|
+
"""
|
196
|
+
Merge another Branch into this Branch.
|
197
|
+
|
198
|
+
Args:
|
199
|
+
branch (Branch): The Branch to merge into this one.
|
200
|
+
update (bool): If True, update existing instruction sets and tools,
|
201
|
+
otherwise only add non-existing ones.
|
202
|
+
|
203
|
+
"""
|
204
|
+
message_copy = branch.messages.copy()
|
205
|
+
self.messages = self.messages.merge(message_copy, how='outer')
|
206
|
+
|
207
|
+
if update:
|
208
|
+
self.instruction_sets.update(branch.instruction_sets)
|
209
|
+
self.tool_manager.registry.update(
|
210
|
+
branch.tool_manager.registry
|
211
|
+
)
|
212
|
+
else:
|
213
|
+
for key, value in branch.instruction_sets.items():
|
214
|
+
if key not in self.instruction_sets:
|
215
|
+
self.instruction_sets[key] = value
|
216
|
+
|
217
|
+
for key, value in branch.tool_manager.registry.items():
|
218
|
+
if key not in self.tool_manager.registry:
|
219
|
+
self.tool_manager.registry[key] = value
|
220
|
+
|
221
|
+
@property
|
222
|
+
def messages_describe(self) -> Dict[str, Any]:
|
223
|
+
"""
|
224
|
+
Describe the conversation and its messages.
|
225
|
+
|
226
|
+
Returns:
|
227
|
+
Dict[str, Any]: A dictionary containing information about the conversation and its messages.
|
228
|
+
|
229
|
+
Examples:
|
230
|
+
>>> description = branch.messages_describe()
|
231
|
+
>>> print(description["total_messages"])
|
232
|
+
0
|
233
|
+
"""
|
234
|
+
return {
|
235
|
+
"total_messages": len(self.messages),
|
236
|
+
"summary_by_role": self.info(),
|
237
|
+
"summary_by_sender": self.info(use_sender=True),
|
238
|
+
"instruction_sets": self.instruction_sets,
|
239
|
+
"registered_tools": self.tool_manager.registry,
|
240
|
+
"messages": [
|
241
|
+
msg.to_dict() for _, msg in self.messages.iterrows()
|
242
|
+
],
|
243
|
+
}
|
244
|
+
|
245
|
+
def to_chatcompletion_message(self) -> List[Dict[str, Any]]:
|
246
|
+
"""
|
247
|
+
Convert the conversation into a chat completion message format suitable for the OpenAI API.
|
248
|
+
|
249
|
+
Returns:
|
250
|
+
List[Dict[str, Any]]: A list of messages in chat completion message format.
|
251
|
+
|
252
|
+
Examples:
|
253
|
+
>>> chat_completion_message = branch.to_chatcompletion_message()
|
254
|
+
"""
|
255
|
+
message = []
|
256
|
+
for _, row in self.messages.iterrows():
|
257
|
+
content_ = row['content']
|
258
|
+
if content_.startswith('Sender'):
|
259
|
+
content_ = content_.split(':', 1)[1]
|
260
|
+
if isinstance(content_, str):
|
261
|
+
try:
|
262
|
+
content_ = json.dumps(as_dict(content_))
|
263
|
+
except Exception as e:
|
264
|
+
raise ValueError(f"Error in serealizing, {row['node_id']} {content_}: {e}")
|
265
|
+
|
266
|
+
out = {"role": row['role'], "content": content_}
|
267
|
+
message.append(out)
|
268
|
+
return message
|
269
|
+
|
270
|
+
def _is_invoked(self) -> bool:
|
271
|
+
"""
|
272
|
+
Check if the conversation has been invoked with an action response.
|
273
|
+
|
274
|
+
Returns:
|
275
|
+
bool: True if the conversation has been invoked, False otherwise.
|
276
|
+
|
277
|
+
"""
|
278
|
+
content = self.messages.iloc[-1]['content']
|
279
|
+
try:
|
280
|
+
if (
|
281
|
+
as_dict(content)['action_response'].keys() >= {'function', 'arguments', 'output'}
|
282
|
+
):
|
283
|
+
return True
|
284
|
+
except:
|
285
|
+
return False
|
286
|
+
|
287
|
+
async def call_chatcompletion(self, **kwargs):
|
288
|
+
"""
|
289
|
+
Call the chat completion service with the current conversation messages.
|
290
|
+
|
291
|
+
This method asynchronously sends the messages to the OpenAI service and updates the conversation
|
292
|
+
with the response.
|
293
|
+
|
294
|
+
Args:
|
295
|
+
**kwargs: Additional keyword arguments to pass to the chat completion service.
|
296
|
+
|
297
|
+
"""
|
298
|
+
messages = self.to_chatcompletion_message()
|
299
|
+
payload, completion = await self.service.serve_chat(messages=messages, **kwargs)
|
300
|
+
if "choices" in completion:
|
301
|
+
self.logger.add_entry({"input": payload, "output": completion})
|
302
|
+
self.add_message(response=completion['choices'][0])
|
303
|
+
self.status_tracker.num_tasks_succeeded += 1
|
304
|
+
else:
|
305
|
+
self.status_tracker.num_tasks_failed += 1
|
306
|
+
|
307
|
+
@property
|
308
|
+
def has_tools(self) -> bool:
|
309
|
+
"""
|
310
|
+
Check if there are any tools registered in the tool manager.
|
311
|
+
|
312
|
+
Returns:
|
313
|
+
bool: True if there are tools registered, False otherwise.
|
314
|
+
|
315
|
+
"""
|
316
|
+
return self.tool_manager.registry != {}
|
317
|
+
|
318
|
+
async def chat(
|
319
|
+
self,
|
320
|
+
instruction: Union[Instruction, str],
|
321
|
+
system: Optional[Union[System, str, Dict[str, Any]]] = None,
|
322
|
+
context: Optional[Any] = None,
|
323
|
+
out: bool = True,
|
324
|
+
sender: Optional[str] = None,
|
325
|
+
invoke: bool = True,
|
326
|
+
tools: Union[bool, Tool, List[Tool], str, List[str]] = False,
|
327
|
+
**kwargs
|
328
|
+
) -> Any:
|
329
|
+
"""
|
330
|
+
Conduct a chat with the conversation, processing instructions and potentially using tools.
|
331
|
+
|
332
|
+
This method asynchronously handles a chat instruction, updates the conversation with the response,
|
333
|
+
and performs tool invocations if specified.
|
334
|
+
|
335
|
+
Args:
|
336
|
+
instruction (Union[Instruction, str]): The chat instruction to process.
|
337
|
+
system (Optional[Union[System, str, Dict[str, Any]]]): The system message to include in the chat.
|
338
|
+
context (Optional[Any]): Additional context to include in the chat.
|
339
|
+
out (bool): If True, return the output of the chat.
|
340
|
+
sender (Optional[str]): The sender of the chat instruction.
|
341
|
+
invoke (bool): If True, invoke tools based on the chat response.
|
342
|
+
tools (Union[bool, Tool, List[Tool], str, List[str]]): Tools to potentially use during the chat.
|
343
|
+
**kwargs: Additional keyword arguments to pass to the chat completion service.
|
344
|
+
|
345
|
+
Returns:
|
346
|
+
Any: The output of the chat, if out is True.
|
347
|
+
|
348
|
+
Examples:
|
349
|
+
>>> result = await branch.chat("What is the weather today?")
|
350
|
+
>>> print(result)
|
351
|
+
"""
|
352
|
+
|
353
|
+
if system:
|
354
|
+
self.change_first_system_message(system)
|
355
|
+
self.add_message(instruction=instruction, context=context, sender=sender)
|
356
|
+
|
357
|
+
if 'tool_parsed' in kwargs:
|
358
|
+
kwargs.pop('tool_parsed')
|
359
|
+
tool_kwarg = {'tools': tools}
|
360
|
+
kwargs = {**tool_kwarg, **kwargs}
|
361
|
+
else:
|
362
|
+
if tools and self.has_tools:
|
363
|
+
kwargs = self.tool_manager._tool_parser(tools=tools, **kwargs)
|
364
|
+
|
365
|
+
config = {**self.llmconfig, **kwargs}
|
366
|
+
await self.call_chatcompletion(**config)
|
367
|
+
|
368
|
+
async def _output():
|
369
|
+
content_ = as_dict(self.messages.content.iloc[-1])
|
370
|
+
if invoke:
|
371
|
+
try:
|
372
|
+
tool_uses = content_
|
373
|
+
func_calls = lcall(
|
374
|
+
[as_dict(i) for i in tool_uses["action_list"]],
|
375
|
+
self.tool_manager.get_function_call
|
376
|
+
)
|
377
|
+
|
378
|
+
# outs = await alcall(func_calls, self.tool_manager.invoke)
|
379
|
+
|
380
|
+
tasks = [self.tool_manager.invoke(i) for i in func_calls]
|
381
|
+
outs = await asyncio.gather(*tasks)
|
382
|
+
for out_, f in zip(outs, func_calls):
|
383
|
+
self.add_message(
|
384
|
+
response={
|
385
|
+
"function": f[0],
|
386
|
+
"arguments": f[1],
|
387
|
+
"output": out_
|
388
|
+
}
|
389
|
+
)
|
390
|
+
except:
|
391
|
+
pass
|
392
|
+
if out:
|
393
|
+
if (
|
394
|
+
len(content_.items()) == 1
|
395
|
+
and len(get_flattened_keys(content_)) == 1
|
396
|
+
):
|
397
|
+
key = get_flattened_keys(content_)[0]
|
398
|
+
return content_[key]
|
399
|
+
return content_
|
400
|
+
|
401
|
+
return await _output()
|
402
|
+
|
403
|
+
async def auto_followup(
|
404
|
+
self,
|
405
|
+
instruction: Union[Instruction, str],
|
406
|
+
num: int = 3,
|
407
|
+
tools: Union[bool, Tool, List[Tool], str, List[str], List[Dict]] = False,
|
408
|
+
fallback: Optional[Callable] = None,
|
409
|
+
fallback_kwargs: Optional[Dict] = None,
|
410
|
+
**kwargs
|
411
|
+
) -> None:
|
412
|
+
"""
|
413
|
+
Automatically perform follow-up chats based on the conversation state.
|
414
|
+
|
415
|
+
This method asynchronously conducts follow-up chats based on the conversation state and tool invocations,
|
416
|
+
with an optional fallback if the maximum number of follow-ups is reached.
|
417
|
+
|
418
|
+
Args:
|
419
|
+
instruction (Union[Instruction, str]): The chat instruction to process.
|
420
|
+
num (int): The maximum number of follow-up chats to perform.
|
421
|
+
tools (Union[bool, Tool, List[Tool], str, List[str], List[Dict]]): Tools to potentially use during the chats.
|
422
|
+
fallback (Optional[Callable]): A fallback function to call if the maximum number of follow-ups is reached.
|
423
|
+
fallback_kwargs (Optional[Dict]): Keyword arguments to pass to the fallback function.
|
424
|
+
**kwargs: Additional keyword arguments to pass to the chat completion service.
|
425
|
+
|
426
|
+
Examples:
|
427
|
+
>>> await branch.auto_followup("Could you elaborate on that?")
|
428
|
+
"""
|
429
|
+
if self.tool_manager.registry != {} and tools:
|
430
|
+
kwargs = self.tool_manager._tool_parser(tools=tools, **kwargs)
|
431
|
+
|
432
|
+
cont_ = True
|
433
|
+
while num > 0 and cont_ is True:
|
434
|
+
if tools:
|
435
|
+
await self.chat(instruction, tool_choice="auto", tool_parsed=True, out=False, **kwargs)
|
436
|
+
else:
|
437
|
+
await self.chat(instruction, tool_parsed=True, out=False, **kwargs)
|
438
|
+
num -= 1
|
439
|
+
cont_ = True if self._is_invoked() else False
|
440
|
+
if num == 0:
|
441
|
+
if fallback is not None:
|
442
|
+
if asyncio.iscoroutinefunction(fallback):
|
443
|
+
return await fallback(**fallback_kwargs)
|
444
|
+
else:
|
445
|
+
return fallback(**fallback_kwargs)
|
446
|
+
return await self.chat(instruction, tool_parsed=True, **kwargs)
|
447
|
+
|
448
|
+
def send(self, to_name, title, package):
|
449
|
+
"""
|
450
|
+
Send a request package to a specified recipient.
|
451
|
+
|
452
|
+
Args:
|
453
|
+
to_name (str): The name of the recipient.
|
454
|
+
title (str): The title or category of the request (e.g., 'messages', 'tool', 'service', 'llmconfig').
|
455
|
+
package (Any): The actual data or object to be sent. Its expected type depends on the title.
|
456
|
+
"""
|
457
|
+
request = Request(from_name=self.name, to_name=to_name, title=title, request=package)
|
458
|
+
self.pending_outs.append(request)
|
459
|
+
|
460
|
+
def receive(self, from_name, messages=True, tool=True, service=True, llmconfig=True):
|
461
|
+
"""
|
462
|
+
Process and integrate received request packages based on their titles.
|
463
|
+
|
464
|
+
Args:
|
465
|
+
from_name (str): The name of the sender whose packages are to be processed.
|
466
|
+
messages (bool, optional): If True, processes 'messages' requests.
|
467
|
+
tool (bool, optional): If True, processes 'tool' requests.
|
468
|
+
service (bool, optional): If True, processes 'service' requests.
|
469
|
+
llmconfig (bool, optional): If True, processes 'llmconfig' requests.
|
470
|
+
|
471
|
+
Raises:
|
472
|
+
ValueError: If no package is found from the specified sender, or if any of the packages have an invalid format.
|
473
|
+
"""
|
474
|
+
skipped_requests = deque()
|
475
|
+
if from_name not in self.pending_ins:
|
476
|
+
raise ValueError(f'No package from {from_name}')
|
477
|
+
while self.pending_ins[from_name]:
|
478
|
+
request = self.pending_ins[from_name].popleft()
|
479
|
+
|
480
|
+
if request.title == 'messages' and messages:
|
481
|
+
if not isinstance(request.request, pd.DataFrame):
|
482
|
+
raise ValueError('Invalid messages format')
|
483
|
+
validate_messages(request.request)
|
484
|
+
self.messages = self.messages.merge(request.request, how='outer')
|
485
|
+
continue
|
486
|
+
|
487
|
+
elif request.title == 'tool' and tool:
|
488
|
+
if not isinstance(request.request, Tool):
|
489
|
+
raise ValueError('Invalid tool format')
|
490
|
+
self.tool_manager.register_tools([request.request])
|
491
|
+
|
492
|
+
elif request.title == 'service' and service:
|
493
|
+
if not isinstance(request.request, BaseService):
|
494
|
+
raise ValueError('Invalid service format')
|
495
|
+
self.service = request.request
|
496
|
+
|
497
|
+
elif request.title == 'llmconfig' and llmconfig:
|
498
|
+
if not isinstance(request.request, dict):
|
499
|
+
raise ValueError('Invalid llmconfig format')
|
500
|
+
self.llmconfig.update(request.request)
|
501
|
+
|
502
|
+
else:
|
503
|
+
skipped_requests.append(request)
|
504
|
+
|
505
|
+
self.pending_ins[from_name] = skipped_requests
|
506
|
+
|
507
|
+
def receive_all(self):
|
508
|
+
"""
|
509
|
+
Process all pending incoming requests from all senders.
|
510
|
+
"""
|
511
|
+
for key in list(self.pending_ins.keys()):
|
512
|
+
self.receive(key)
|
513
|
+
|
514
|
+
|
515
|
+
# def add_instruction_set(self, name: str, instruction_set: InstructionSet):
|
516
|
+
# """
|
517
|
+
# Add an instruction set to the conversation.
|
518
|
+
#
|
519
|
+
# Args:
|
520
|
+
# name (str): The name of the instruction set.
|
521
|
+
# instruction_set (InstructionSet): The instruction set to add.
|
522
|
+
#
|
523
|
+
# Examples:
|
524
|
+
# >>> branch.add_instruction_set("greet", InstructionSet(instructions=["Hello", "Hi"]))
|
525
|
+
# """
|
526
|
+
# self.instruction_sets[name] = instruction_set
|
527
|
+
|
528
|
+
# def remove_instruction_set(self, name: str) -> bool:
|
529
|
+
# """
|
530
|
+
# Remove an instruction set from the conversation.
|
531
|
+
#
|
532
|
+
# Args:
|
533
|
+
# name (str): The name of the instruction set to remove.
|
534
|
+
#
|
535
|
+
# Returns:
|
536
|
+
# bool: True if the instruction set was removed, False otherwise.
|
537
|
+
#
|
538
|
+
# Examples:
|
539
|
+
# >>> branch.remove_instruction_set("greet")
|
540
|
+
# True
|
541
|
+
# """
|
542
|
+
# return self.instruction_sets.pop(name)
|
543
|
+
|
544
|
+
# async def instruction_set_auto_followup(
|
545
|
+
# self,
|
546
|
+
# instruction_set: InstructionSet,
|
547
|
+
# num: Union[int, List[int]] = 3,
|
548
|
+
# **kwargs
|
549
|
+
# ) -> None:
|
550
|
+
# """
|
551
|
+
# Automatically perform follow-up chats for an entire instruction set.
|
552
|
+
#
|
553
|
+
# This method asynchronously conducts follow-up chats for each instruction in the provided instruction set,
|
554
|
+
# handling tool invocations as specified.
|
555
|
+
#
|
556
|
+
# Args:
|
557
|
+
# instruction_set (InstructionSet): The instruction set to process.
|
558
|
+
# num (Union[int, List[int]]): The maximum number of follow-up chats to perform for each instruction,
|
559
|
+
# or a list of maximum numbers corresponding to each instruction.
|
560
|
+
# **kwargs: Additional keyword arguments to pass to the chat completion service.
|
561
|
+
#
|
562
|
+
# Raises:
|
563
|
+
# ValueError: If the length of `num` as a list does not match the number of instructions in the set.
|
564
|
+
#
|
565
|
+
# Examples:
|
566
|
+
# >>> instruction_set = InstructionSet(instructions=["What's the weather?", "And for tomorrow?"])
|
567
|
+
# >>> await branch.instruction_set_auto_followup(instruction_set)
|
568
|
+
# """
|
569
|
+
#
|
570
|
+
# if isinstance(num, List):
|
571
|
+
# if len(num) != instruction_set.instruct_len:
|
572
|
+
# raise ValueError(
|
573
|
+
# 'Unmatched auto_followup num size and instructions set size'
|
574
|
+
# )
|
575
|
+
# current_instruct_node = instruction_set.get_instruction_by_id(
|
576
|
+
# instruction_set.first_instruct
|
577
|
+
# )
|
578
|
+
# for i in range(instruction_set.instruct_len):
|
579
|
+
# num_ = num if isinstance(num, int) else num[i]
|
580
|
+
# tools = instruction_set.get_tools(current_instruct_node)
|
581
|
+
# if tools:
|
582
|
+
# await self.auto_followup(
|
583
|
+
# current_instruct_node, num=num_, tools=tools, self=self, **kwargs
|
584
|
+
# )
|
585
|
+
# else:
|
586
|
+
# await self.chat(current_instruct_node)
|
587
|
+
# current_instruct_node = instruction_set.get_next_instruction(
|
588
|
+
# current_instruct_node
|
589
|
+
# )
|