lionagi 0.0.115__py3-none-any.whl → 0.0.204__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (123) hide show
  1. lionagi/__init__.py +1 -2
  2. lionagi/_services/__init__.py +5 -0
  3. lionagi/_services/anthropic.py +79 -0
  4. lionagi/_services/base_service.py +414 -0
  5. lionagi/_services/oai.py +98 -0
  6. lionagi/_services/openrouter.py +44 -0
  7. lionagi/_services/services.py +91 -0
  8. lionagi/_services/transformers.py +46 -0
  9. lionagi/bridge/langchain.py +26 -16
  10. lionagi/bridge/llama_index.py +50 -20
  11. lionagi/configs/oai_configs.py +2 -14
  12. lionagi/configs/openrouter_configs.py +2 -2
  13. lionagi/core/__init__.py +7 -8
  14. lionagi/core/branch/branch.py +589 -0
  15. lionagi/core/branch/branch_manager.py +139 -0
  16. lionagi/core/branch/conversation.py +484 -0
  17. lionagi/core/core_util.py +59 -0
  18. lionagi/core/flow/flow.py +19 -0
  19. lionagi/core/flow/flow_util.py +62 -0
  20. lionagi/core/instruction_set/__init__.py +0 -5
  21. lionagi/core/instruction_set/instruction_set.py +343 -0
  22. lionagi/core/messages/messages.py +176 -0
  23. lionagi/core/sessions/__init__.py +0 -5
  24. lionagi/core/sessions/session.py +428 -0
  25. lionagi/loaders/chunker.py +51 -47
  26. lionagi/loaders/load_util.py +2 -2
  27. lionagi/loaders/reader.py +45 -39
  28. lionagi/models/imodel.py +53 -0
  29. lionagi/schema/async_queue.py +158 -0
  30. lionagi/schema/base_node.py +318 -147
  31. lionagi/schema/base_tool.py +31 -1
  32. lionagi/schema/data_logger.py +74 -38
  33. lionagi/schema/data_node.py +57 -6
  34. lionagi/structures/graph.py +132 -10
  35. lionagi/structures/relationship.py +58 -20
  36. lionagi/structures/structure.py +36 -25
  37. lionagi/tests/test_utils/test_api_util.py +219 -0
  38. lionagi/tests/test_utils/test_call_util.py +785 -0
  39. lionagi/tests/test_utils/test_encrypt_util.py +323 -0
  40. lionagi/tests/test_utils/test_io_util.py +238 -0
  41. lionagi/tests/test_utils/test_nested_util.py +338 -0
  42. lionagi/tests/test_utils/test_sys_util.py +358 -0
  43. lionagi/tools/tool_manager.py +186 -0
  44. lionagi/tools/tool_util.py +266 -3
  45. lionagi/utils/__init__.py +21 -61
  46. lionagi/utils/api_util.py +359 -71
  47. lionagi/utils/call_util.py +839 -264
  48. lionagi/utils/encrypt_util.py +283 -16
  49. lionagi/utils/io_util.py +178 -93
  50. lionagi/utils/nested_util.py +672 -0
  51. lionagi/utils/pd_util.py +57 -0
  52. lionagi/utils/sys_util.py +284 -156
  53. lionagi/utils/url_util.py +55 -0
  54. lionagi/version.py +1 -1
  55. {lionagi-0.0.115.dist-info → lionagi-0.0.204.dist-info}/METADATA +21 -17
  56. lionagi-0.0.204.dist-info/RECORD +106 -0
  57. lionagi/core/conversations/__init__.py +0 -5
  58. lionagi/core/conversations/conversation.py +0 -107
  59. lionagi/core/flows/__init__.py +0 -8
  60. lionagi/core/flows/flow.py +0 -8
  61. lionagi/core/flows/flow_util.py +0 -62
  62. lionagi/core/instruction_set/instruction_sets.py +0 -7
  63. lionagi/core/sessions/sessions.py +0 -185
  64. lionagi/endpoints/__init__.py +0 -5
  65. lionagi/endpoints/audio.py +0 -17
  66. lionagi/endpoints/chatcompletion.py +0 -54
  67. lionagi/messages/__init__.py +0 -11
  68. lionagi/messages/instruction.py +0 -15
  69. lionagi/messages/message.py +0 -110
  70. lionagi/messages/response.py +0 -33
  71. lionagi/messages/system.py +0 -12
  72. lionagi/objs/__init__.py +0 -11
  73. lionagi/objs/abc_objs.py +0 -39
  74. lionagi/objs/async_queue.py +0 -135
  75. lionagi/objs/messenger.py +0 -85
  76. lionagi/objs/tool_manager.py +0 -253
  77. lionagi/services/__init__.py +0 -11
  78. lionagi/services/base_api_service.py +0 -230
  79. lionagi/services/oai.py +0 -34
  80. lionagi/services/openrouter.py +0 -31
  81. lionagi/tests/test_api_util.py +0 -46
  82. lionagi/tests/test_call_util.py +0 -115
  83. lionagi/tests/test_convert_util.py +0 -202
  84. lionagi/tests/test_encrypt_util.py +0 -33
  85. lionagi/tests/test_flat_util.py +0 -426
  86. lionagi/tests/test_sys_util.py +0 -0
  87. lionagi/utils/convert_util.py +0 -229
  88. lionagi/utils/flat_util.py +0 -599
  89. lionagi-0.0.115.dist-info/RECORD +0 -110
  90. /lionagi/{services → _services}/anyscale.py +0 -0
  91. /lionagi/{services → _services}/azure.py +0 -0
  92. /lionagi/{services → _services}/bedrock.py +0 -0
  93. /lionagi/{services → _services}/everlyai.py +0 -0
  94. /lionagi/{services → _services}/gemini.py +0 -0
  95. /lionagi/{services → _services}/gpt4all.py +0 -0
  96. /lionagi/{services → _services}/huggingface.py +0 -0
  97. /lionagi/{services → _services}/litellm.py +0 -0
  98. /lionagi/{services → _services}/localai.py +0 -0
  99. /lionagi/{services → _services}/mistralai.py +0 -0
  100. /lionagi/{services → _services}/ollama.py +0 -0
  101. /lionagi/{services → _services}/openllm.py +0 -0
  102. /lionagi/{services → _services}/perplexity.py +0 -0
  103. /lionagi/{services → _services}/predibase.py +0 -0
  104. /lionagi/{services → _services}/rungpt.py +0 -0
  105. /lionagi/{services → _services}/vllm.py +0 -0
  106. /lionagi/{services → _services}/xinference.py +0 -0
  107. /lionagi/{endpoints/assistants.py → agents/__init__.py} +0 -0
  108. /lionagi/{tools → agents}/planner.py +0 -0
  109. /lionagi/{tools → agents}/prompter.py +0 -0
  110. /lionagi/{tools → agents}/scorer.py +0 -0
  111. /lionagi/{tools → agents}/summarizer.py +0 -0
  112. /lionagi/{tools → agents}/validator.py +0 -0
  113. /lionagi/{endpoints/embeddings.py → core/branch/__init__.py} +0 -0
  114. /lionagi/{services/anthropic.py → core/branch/cluster.py} +0 -0
  115. /lionagi/{endpoints/finetune.py → core/flow/__init__.py} +0 -0
  116. /lionagi/{endpoints/image.py → core/messages/__init__.py} +0 -0
  117. /lionagi/{endpoints/moderation.py → models/__init__.py} +0 -0
  118. /lionagi/{endpoints/vision.py → models/base_model.py} +0 -0
  119. /lionagi/{objs → schema}/status_tracker.py +0 -0
  120. /lionagi/tests/{test_io_util.py → test_utils/__init__.py} +0 -0
  121. {lionagi-0.0.115.dist-info → lionagi-0.0.204.dist-info}/LICENSE +0 -0
  122. {lionagi-0.0.115.dist-info → lionagi-0.0.204.dist-info}/WHEEL +0 -0
  123. {lionagi-0.0.115.dist-info → lionagi-0.0.204.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,589 @@
1
+ import json
2
+
3
+ import pandas as pd
4
+
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+ from collections import deque
7
+ import asyncio
8
+ from dotenv import load_dotenv
9
+
10
+ from lionagi.utils import as_dict, get_flattened_keys, lcall
11
+ from lionagi.schema import Tool
12
+ from lionagi._services.base_service import StatusTracker, BaseService
13
+ from lionagi._services.oai import OpenAIService
14
+ from lionagi._services.openrouter import OpenRouterService
15
+ from lionagi.configs.oai_configs import oai_schema
16
+ from lionagi.configs.openrouter_configs import openrouter_schema
17
+ from lionagi.tools.tool_manager import ToolManager
18
+
19
+ from ..messages.messages import Instruction, System
20
+ from ..instruction_set.instruction_set import InstructionSet
21
+
22
+ from .conversation import Conversation
23
+ from .branch_manager import Request
24
+ from ..core_util import validate_messages
25
+
26
+ load_dotenv()
27
+
28
+ oai_service = OpenAIService()
29
+
30
+ class Branch(Conversation):
31
+ """
32
+ Represents a conversation branch with messages, instruction sets, and tool management.
33
+
34
+ A `Branch` is a type of conversation that can have messages, system instructions, and registered tools
35
+ for interacting with external services or tools.
36
+
37
+ Attributes:
38
+ dir (str): The directory path for storing logs.
39
+ messages (pd.DataFrame): A DataFrame containing conversation messages.
40
+ instruction_sets (Dict[str, InstructionSet]): A dictionary of instruction sets mapped by their names.
41
+ tool_manager (ToolManager): An instance of ToolManager for managing tools.
42
+ service (OpenAIService): An instance of OpenAIService to interact with OpenAI API.
43
+ status_tracker (StatusTracker): An instance of StatusTracker to keep track of the status.
44
+ llmconfig (Dict): Configuration for the language model.
45
+
46
+ Examples:
47
+ >>> branch = Branch(dir="path/to/log")
48
+ >>> branch.add_instruction_set("greet", InstructionSet(instructions=["Hello", "Hi"]))
49
+ >>> branch.remove_instruction_set("greet")
50
+ True
51
+ >>> tool = Tool(name="calculator")
52
+ >>> branch.register_tools(tool)
53
+ >>> branch.messages_describe() # doctest: +SKIP
54
+ {'total_messages': 0, 'summary_by_role': ..., 'summary_by_sender': ..., 'instruction_sets': {}, 'registered_tools': {'calculator': ...}, 'messages': []}
55
+ """
56
+
57
+ def __init__(
58
+ self,
59
+ name: Optional[str] = None,
60
+ dir: Optional[str] = None,
61
+ messages: Optional[pd.DataFrame] = None,
62
+ instruction_sets: Optional[Dict[str, InstructionSet]] = None,
63
+ tool_manager: Optional[ToolManager] = None,
64
+ service: OpenAIService = oai_service,
65
+ llmconfig: Optional[Dict] = None,
66
+ ):
67
+ """
68
+ Initializes a new Branch instance.
69
+
70
+ Args:
71
+ dir (Optional[str]): The directory path for storing logs.
72
+ messages (Optional[pd.DataFrame]): A DataFrame containing conversation messages.
73
+ instruction_sets (Optional[Dict[str, InstructionSet]]): A dictionary of instruction sets.
74
+ tool_manager (Optional[ToolManager]): An instance of ToolManager for managing tools.
75
+ service (OpenAIService): The OpenAI service instance.
76
+ llmconfig (Optional[Dict]): Configuration for the language model.
77
+ """
78
+ super().__init__(dir)
79
+ self.messages = (
80
+ messages
81
+ if messages is not None
82
+ else pd.DataFrame(
83
+ columns=["node_id", "role", "sender", "timestamp", "content"]
84
+ )
85
+ )
86
+ self.instruction_sets = instruction_sets if instruction_sets else {}
87
+ self.tool_manager = tool_manager if tool_manager else ToolManager()
88
+
89
+ self.service = service if service else oai_service
90
+ self.status_tracker = StatusTracker()
91
+ if llmconfig:
92
+ self.llmconfig = llmconfig
93
+ else:
94
+ if isinstance(service, OpenAIService):
95
+ self.llmconfig = oai_schema["chat/completions"]["config"]
96
+ elif isinstance(service, OpenRouterService):
97
+ self.llmconfig = openrouter_schema["chat/completions"]["config"]
98
+ else:
99
+ self.llmconfig = {}
100
+
101
+ self.name = name
102
+ self.pending_ins = {}
103
+ self.pending_outs = deque()
104
+
105
+ def change_first_system_message(
106
+ self, system: Union[str, Dict[str, Any], System], sender: Optional[str] = None
107
+ ):
108
+ """
109
+ Change the system message of the conversation.
110
+
111
+ Args:
112
+ system (Union[str, Dict[str, Any], System]): The new system message.
113
+ sender (Optional[str]): The sender of the system message.
114
+
115
+ Raises:
116
+ ValueError: If the input cannot be converted into a system message.
117
+
118
+ Examples:
119
+ >>> branch.change_first_system_message("System update", sender="admin")
120
+ >>> branch.change_first_system_message({"text": "System reboot", "type": "update"})
121
+ """
122
+ if len(self.messages[self.messages.role == 'system']) == 0:
123
+ raise ValueError("There is no system message in the messages.")
124
+ if isinstance(system, (str, Dict)):
125
+ system = System(system, sender=sender)
126
+ if isinstance(system, System):
127
+ message_dict = system.to_dict()
128
+ if sender:
129
+ message_dict['sender'] = sender
130
+ message_dict['timestamp'] = str(pd.Timestamp.now())
131
+ sys_index = self.messages[self.messages.role == 'system'].index
132
+ self.messages.loc[sys_index[0]] = message_dict
133
+
134
+ else:
135
+ raise ValueError("Input cannot be converted into a system message.")
136
+
137
+ def register_tools(self, tools: Union[Tool, List[Tool]]):
138
+ """
139
+ Register one or more tools with the conversation's tool manager.
140
+
141
+ Args:
142
+ tools (Union[Tool, List[Tool]]): The tools to register.
143
+
144
+ Examples:
145
+ >>> tool = Tool(name="calculator")
146
+ >>> branch.register_tools(tool)
147
+ """
148
+ if not isinstance(tools, list):
149
+ tools = [tools]
150
+ self.tool_manager.register_tools(tools=tools)
151
+
152
+ def delete_tool(self, name: str) -> bool:
153
+ """
154
+ Delete a tool from the conversation's tool manager.
155
+
156
+ Args:
157
+ name (str): The name of the tool to delete.
158
+
159
+ Returns:
160
+ bool: True if the tool was deleted, False otherwise.
161
+
162
+ Examples:
163
+ >>> branch.delete_tool("calculator")
164
+ True
165
+ """
166
+ if name in self.tool_manager.registry:
167
+ self.tool_manager.registry.pop(name)
168
+ return True
169
+ return False
170
+
171
+ def clone(self) -> 'Branch':
172
+ """
173
+ Create a clone of the conversation.
174
+
175
+ Returns:
176
+ Branch: A new Branch object that is a clone of the current conversation.
177
+
178
+ Examples:
179
+ >>> cloned_branch = branch.clone()
180
+ """
181
+ cloned = Branch(
182
+ dir = self.logger.dir,
183
+ messages=self.messages.copy(),
184
+ instruction_sets=self.instruction_sets.copy(),
185
+ tool_manager=ToolManager()
186
+ )
187
+ tools = [
188
+ tool for tool in self.tool_manager.registry.values()]
189
+
190
+ cloned.register_tools(tools)
191
+
192
+ return cloned
193
+
194
+ def merge_branch(self, branch: 'Branch', update: bool = True):
195
+ """
196
+ Merge another Branch into this Branch.
197
+
198
+ Args:
199
+ branch (Branch): The Branch to merge into this one.
200
+ update (bool): If True, update existing instruction sets and tools,
201
+ otherwise only add non-existing ones.
202
+
203
+ """
204
+ message_copy = branch.messages.copy()
205
+ self.messages = self.messages.merge(message_copy, how='outer')
206
+
207
+ if update:
208
+ self.instruction_sets.update(branch.instruction_sets)
209
+ self.tool_manager.registry.update(
210
+ branch.tool_manager.registry
211
+ )
212
+ else:
213
+ for key, value in branch.instruction_sets.items():
214
+ if key not in self.instruction_sets:
215
+ self.instruction_sets[key] = value
216
+
217
+ for key, value in branch.tool_manager.registry.items():
218
+ if key not in self.tool_manager.registry:
219
+ self.tool_manager.registry[key] = value
220
+
221
+ @property
222
+ def messages_describe(self) -> Dict[str, Any]:
223
+ """
224
+ Describe the conversation and its messages.
225
+
226
+ Returns:
227
+ Dict[str, Any]: A dictionary containing information about the conversation and its messages.
228
+
229
+ Examples:
230
+ >>> description = branch.messages_describe()
231
+ >>> print(description["total_messages"])
232
+ 0
233
+ """
234
+ return {
235
+ "total_messages": len(self.messages),
236
+ "summary_by_role": self.info(),
237
+ "summary_by_sender": self.info(use_sender=True),
238
+ "instruction_sets": self.instruction_sets,
239
+ "registered_tools": self.tool_manager.registry,
240
+ "messages": [
241
+ msg.to_dict() for _, msg in self.messages.iterrows()
242
+ ],
243
+ }
244
+
245
+ def to_chatcompletion_message(self) -> List[Dict[str, Any]]:
246
+ """
247
+ Convert the conversation into a chat completion message format suitable for the OpenAI API.
248
+
249
+ Returns:
250
+ List[Dict[str, Any]]: A list of messages in chat completion message format.
251
+
252
+ Examples:
253
+ >>> chat_completion_message = branch.to_chatcompletion_message()
254
+ """
255
+ message = []
256
+ for _, row in self.messages.iterrows():
257
+ content_ = row['content']
258
+ if content_.startswith('Sender'):
259
+ content_ = content_.split(':', 1)[1]
260
+ if isinstance(content_, str):
261
+ try:
262
+ content_ = json.dumps(as_dict(content_))
263
+ except Exception as e:
264
+ raise ValueError(f"Error in serealizing, {row['node_id']} {content_}: {e}")
265
+
266
+ out = {"role": row['role'], "content": content_}
267
+ message.append(out)
268
+ return message
269
+
270
+ def _is_invoked(self) -> bool:
271
+ """
272
+ Check if the conversation has been invoked with an action response.
273
+
274
+ Returns:
275
+ bool: True if the conversation has been invoked, False otherwise.
276
+
277
+ """
278
+ content = self.messages.iloc[-1]['content']
279
+ try:
280
+ if (
281
+ as_dict(content)['action_response'].keys() >= {'function', 'arguments', 'output'}
282
+ ):
283
+ return True
284
+ except:
285
+ return False
286
+
287
+ async def call_chatcompletion(self, **kwargs):
288
+ """
289
+ Call the chat completion service with the current conversation messages.
290
+
291
+ This method asynchronously sends the messages to the OpenAI service and updates the conversation
292
+ with the response.
293
+
294
+ Args:
295
+ **kwargs: Additional keyword arguments to pass to the chat completion service.
296
+
297
+ """
298
+ messages = self.to_chatcompletion_message()
299
+ payload, completion = await self.service.serve_chat(messages=messages, **kwargs)
300
+ if "choices" in completion:
301
+ self.logger.add_entry({"input": payload, "output": completion})
302
+ self.add_message(response=completion['choices'][0])
303
+ self.status_tracker.num_tasks_succeeded += 1
304
+ else:
305
+ self.status_tracker.num_tasks_failed += 1
306
+
307
+ @property
308
+ def has_tools(self) -> bool:
309
+ """
310
+ Check if there are any tools registered in the tool manager.
311
+
312
+ Returns:
313
+ bool: True if there are tools registered, False otherwise.
314
+
315
+ """
316
+ return self.tool_manager.registry != {}
317
+
318
+ async def chat(
319
+ self,
320
+ instruction: Union[Instruction, str],
321
+ system: Optional[Union[System, str, Dict[str, Any]]] = None,
322
+ context: Optional[Any] = None,
323
+ out: bool = True,
324
+ sender: Optional[str] = None,
325
+ invoke: bool = True,
326
+ tools: Union[bool, Tool, List[Tool], str, List[str]] = False,
327
+ **kwargs
328
+ ) -> Any:
329
+ """
330
+ Conduct a chat with the conversation, processing instructions and potentially using tools.
331
+
332
+ This method asynchronously handles a chat instruction, updates the conversation with the response,
333
+ and performs tool invocations if specified.
334
+
335
+ Args:
336
+ instruction (Union[Instruction, str]): The chat instruction to process.
337
+ system (Optional[Union[System, str, Dict[str, Any]]]): The system message to include in the chat.
338
+ context (Optional[Any]): Additional context to include in the chat.
339
+ out (bool): If True, return the output of the chat.
340
+ sender (Optional[str]): The sender of the chat instruction.
341
+ invoke (bool): If True, invoke tools based on the chat response.
342
+ tools (Union[bool, Tool, List[Tool], str, List[str]]): Tools to potentially use during the chat.
343
+ **kwargs: Additional keyword arguments to pass to the chat completion service.
344
+
345
+ Returns:
346
+ Any: The output of the chat, if out is True.
347
+
348
+ Examples:
349
+ >>> result = await branch.chat("What is the weather today?")
350
+ >>> print(result)
351
+ """
352
+
353
+ if system:
354
+ self.change_first_system_message(system)
355
+ self.add_message(instruction=instruction, context=context, sender=sender)
356
+
357
+ if 'tool_parsed' in kwargs:
358
+ kwargs.pop('tool_parsed')
359
+ tool_kwarg = {'tools': tools}
360
+ kwargs = {**tool_kwarg, **kwargs}
361
+ else:
362
+ if tools and self.has_tools:
363
+ kwargs = self.tool_manager._tool_parser(tools=tools, **kwargs)
364
+
365
+ config = {**self.llmconfig, **kwargs}
366
+ await self.call_chatcompletion(**config)
367
+
368
+ async def _output():
369
+ content_ = as_dict(self.messages.content.iloc[-1])
370
+ if invoke:
371
+ try:
372
+ tool_uses = content_
373
+ func_calls = lcall(
374
+ [as_dict(i) for i in tool_uses["action_list"]],
375
+ self.tool_manager.get_function_call
376
+ )
377
+
378
+ # outs = await alcall(func_calls, self.tool_manager.invoke)
379
+
380
+ tasks = [self.tool_manager.invoke(i) for i in func_calls]
381
+ outs = await asyncio.gather(*tasks)
382
+ for out_, f in zip(outs, func_calls):
383
+ self.add_message(
384
+ response={
385
+ "function": f[0],
386
+ "arguments": f[1],
387
+ "output": out_
388
+ }
389
+ )
390
+ except:
391
+ pass
392
+ if out:
393
+ if (
394
+ len(content_.items()) == 1
395
+ and len(get_flattened_keys(content_)) == 1
396
+ ):
397
+ key = get_flattened_keys(content_)[0]
398
+ return content_[key]
399
+ return content_
400
+
401
+ return await _output()
402
+
403
+ async def auto_followup(
404
+ self,
405
+ instruction: Union[Instruction, str],
406
+ num: int = 3,
407
+ tools: Union[bool, Tool, List[Tool], str, List[str], List[Dict]] = False,
408
+ fallback: Optional[Callable] = None,
409
+ fallback_kwargs: Optional[Dict] = None,
410
+ **kwargs
411
+ ) -> None:
412
+ """
413
+ Automatically perform follow-up chats based on the conversation state.
414
+
415
+ This method asynchronously conducts follow-up chats based on the conversation state and tool invocations,
416
+ with an optional fallback if the maximum number of follow-ups is reached.
417
+
418
+ Args:
419
+ instruction (Union[Instruction, str]): The chat instruction to process.
420
+ num (int): The maximum number of follow-up chats to perform.
421
+ tools (Union[bool, Tool, List[Tool], str, List[str], List[Dict]]): Tools to potentially use during the chats.
422
+ fallback (Optional[Callable]): A fallback function to call if the maximum number of follow-ups is reached.
423
+ fallback_kwargs (Optional[Dict]): Keyword arguments to pass to the fallback function.
424
+ **kwargs: Additional keyword arguments to pass to the chat completion service.
425
+
426
+ Examples:
427
+ >>> await branch.auto_followup("Could you elaborate on that?")
428
+ """
429
+ if self.tool_manager.registry != {} and tools:
430
+ kwargs = self.tool_manager._tool_parser(tools=tools, **kwargs)
431
+
432
+ cont_ = True
433
+ while num > 0 and cont_ is True:
434
+ if tools:
435
+ await self.chat(instruction, tool_choice="auto", tool_parsed=True, out=False, **kwargs)
436
+ else:
437
+ await self.chat(instruction, tool_parsed=True, out=False, **kwargs)
438
+ num -= 1
439
+ cont_ = True if self._is_invoked() else False
440
+ if num == 0:
441
+ if fallback is not None:
442
+ if asyncio.iscoroutinefunction(fallback):
443
+ return await fallback(**fallback_kwargs)
444
+ else:
445
+ return fallback(**fallback_kwargs)
446
+ return await self.chat(instruction, tool_parsed=True, **kwargs)
447
+
448
+ def send(self, to_name, title, package):
449
+ """
450
+ Send a request package to a specified recipient.
451
+
452
+ Args:
453
+ to_name (str): The name of the recipient.
454
+ title (str): The title or category of the request (e.g., 'messages', 'tool', 'service', 'llmconfig').
455
+ package (Any): The actual data or object to be sent. Its expected type depends on the title.
456
+ """
457
+ request = Request(from_name=self.name, to_name=to_name, title=title, request=package)
458
+ self.pending_outs.append(request)
459
+
460
+ def receive(self, from_name, messages=True, tool=True, service=True, llmconfig=True):
461
+ """
462
+ Process and integrate received request packages based on their titles.
463
+
464
+ Args:
465
+ from_name (str): The name of the sender whose packages are to be processed.
466
+ messages (bool, optional): If True, processes 'messages' requests.
467
+ tool (bool, optional): If True, processes 'tool' requests.
468
+ service (bool, optional): If True, processes 'service' requests.
469
+ llmconfig (bool, optional): If True, processes 'llmconfig' requests.
470
+
471
+ Raises:
472
+ ValueError: If no package is found from the specified sender, or if any of the packages have an invalid format.
473
+ """
474
+ skipped_requests = deque()
475
+ if from_name not in self.pending_ins:
476
+ raise ValueError(f'No package from {from_name}')
477
+ while self.pending_ins[from_name]:
478
+ request = self.pending_ins[from_name].popleft()
479
+
480
+ if request.title == 'messages' and messages:
481
+ if not isinstance(request.request, pd.DataFrame):
482
+ raise ValueError('Invalid messages format')
483
+ validate_messages(request.request)
484
+ self.messages = self.messages.merge(request.request, how='outer')
485
+ continue
486
+
487
+ elif request.title == 'tool' and tool:
488
+ if not isinstance(request.request, Tool):
489
+ raise ValueError('Invalid tool format')
490
+ self.tool_manager.register_tools([request.request])
491
+
492
+ elif request.title == 'service' and service:
493
+ if not isinstance(request.request, BaseService):
494
+ raise ValueError('Invalid service format')
495
+ self.service = request.request
496
+
497
+ elif request.title == 'llmconfig' and llmconfig:
498
+ if not isinstance(request.request, dict):
499
+ raise ValueError('Invalid llmconfig format')
500
+ self.llmconfig.update(request.request)
501
+
502
+ else:
503
+ skipped_requests.append(request)
504
+
505
+ self.pending_ins[from_name] = skipped_requests
506
+
507
+ def receive_all(self):
508
+ """
509
+ Process all pending incoming requests from all senders.
510
+ """
511
+ for key in list(self.pending_ins.keys()):
512
+ self.receive(key)
513
+
514
+
515
+ # def add_instruction_set(self, name: str, instruction_set: InstructionSet):
516
+ # """
517
+ # Add an instruction set to the conversation.
518
+ #
519
+ # Args:
520
+ # name (str): The name of the instruction set.
521
+ # instruction_set (InstructionSet): The instruction set to add.
522
+ #
523
+ # Examples:
524
+ # >>> branch.add_instruction_set("greet", InstructionSet(instructions=["Hello", "Hi"]))
525
+ # """
526
+ # self.instruction_sets[name] = instruction_set
527
+
528
+ # def remove_instruction_set(self, name: str) -> bool:
529
+ # """
530
+ # Remove an instruction set from the conversation.
531
+ #
532
+ # Args:
533
+ # name (str): The name of the instruction set to remove.
534
+ #
535
+ # Returns:
536
+ # bool: True if the instruction set was removed, False otherwise.
537
+ #
538
+ # Examples:
539
+ # >>> branch.remove_instruction_set("greet")
540
+ # True
541
+ # """
542
+ # return self.instruction_sets.pop(name)
543
+
544
+ # async def instruction_set_auto_followup(
545
+ # self,
546
+ # instruction_set: InstructionSet,
547
+ # num: Union[int, List[int]] = 3,
548
+ # **kwargs
549
+ # ) -> None:
550
+ # """
551
+ # Automatically perform follow-up chats for an entire instruction set.
552
+ #
553
+ # This method asynchronously conducts follow-up chats for each instruction in the provided instruction set,
554
+ # handling tool invocations as specified.
555
+ #
556
+ # Args:
557
+ # instruction_set (InstructionSet): The instruction set to process.
558
+ # num (Union[int, List[int]]): The maximum number of follow-up chats to perform for each instruction,
559
+ # or a list of maximum numbers corresponding to each instruction.
560
+ # **kwargs: Additional keyword arguments to pass to the chat completion service.
561
+ #
562
+ # Raises:
563
+ # ValueError: If the length of `num` as a list does not match the number of instructions in the set.
564
+ #
565
+ # Examples:
566
+ # >>> instruction_set = InstructionSet(instructions=["What's the weather?", "And for tomorrow?"])
567
+ # >>> await branch.instruction_set_auto_followup(instruction_set)
568
+ # """
569
+ #
570
+ # if isinstance(num, List):
571
+ # if len(num) != instruction_set.instruct_len:
572
+ # raise ValueError(
573
+ # 'Unmatched auto_followup num size and instructions set size'
574
+ # )
575
+ # current_instruct_node = instruction_set.get_instruction_by_id(
576
+ # instruction_set.first_instruct
577
+ # )
578
+ # for i in range(instruction_set.instruct_len):
579
+ # num_ = num if isinstance(num, int) else num[i]
580
+ # tools = instruction_set.get_tools(current_instruct_node)
581
+ # if tools:
582
+ # await self.auto_followup(
583
+ # current_instruct_node, num=num_, tools=tools, self=self, **kwargs
584
+ # )
585
+ # else:
586
+ # await self.chat(current_instruct_node)
587
+ # current_instruct_node = instruction_set.get_next_instruction(
588
+ # current_instruct_node
589
+ # )