lionagi 0.0.104__tar.gz → 0.0.106__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (32) hide show
  1. {lionagi-0.0.104 → lionagi-0.0.106}/PKG-INFO +4 -3
  2. {lionagi-0.0.104 → lionagi-0.0.106}/README.md +3 -2
  3. lionagi-0.0.106/lionagi/session/conversation.py +96 -0
  4. lionagi-0.0.106/lionagi/session/message.py +142 -0
  5. lionagi-0.0.106/lionagi/session/session.py +309 -0
  6. lionagi-0.0.106/lionagi/tools/__init__.py +0 -0
  7. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/utils/__init__.py +2 -1
  8. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/utils/doc_util.py +38 -38
  9. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/utils/sys_util.py +6 -2
  10. lionagi-0.0.106/lionagi/utils/tool_util.py +194 -0
  11. lionagi-0.0.106/lionagi/version.py +1 -0
  12. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi.egg-info/PKG-INFO +4 -3
  13. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi.egg-info/SOURCES.txt +3 -1
  14. lionagi-0.0.104/lionagi/session/conversation.py +0 -91
  15. lionagi-0.0.104/lionagi/session/message.py +0 -76
  16. lionagi-0.0.104/lionagi/session/session.py +0 -165
  17. lionagi-0.0.104/lionagi/version.py +0 -1
  18. {lionagi-0.0.104 → lionagi-0.0.106}/LICENSE +0 -0
  19. {lionagi-0.0.104 → lionagi-0.0.106}/README.rst +0 -0
  20. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/__init__.py +0 -0
  21. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/api/__init__.py +0 -0
  22. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/api/oai_config.py +0 -0
  23. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/api/oai_service.py +0 -0
  24. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/session/__init__.py +0 -0
  25. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/utils/api_util.py +0 -0
  26. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi/utils/log_util.py +0 -0
  27. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi.egg-info/dependency_links.txt +0 -0
  28. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi.egg-info/requires.txt +0 -0
  29. {lionagi-0.0.104 → lionagi-0.0.106}/lionagi.egg-info/top_level.txt +0 -0
  30. {lionagi-0.0.104 → lionagi-0.0.106}/pyproject.toml +0 -0
  31. {lionagi-0.0.104 → lionagi-0.0.106}/setup.cfg +0 -0
  32. {lionagi-0.0.104 → lionagi-0.0.106}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lionagi
3
- Version: 0.0.104
3
+ Version: 0.0.106
4
4
  Summary: Towards automated general intelligence.
5
5
  Author: HaiyangLi
6
6
  Author-email: Haiyang Li <ocean@lionagi.ai>
@@ -227,7 +227,8 @@ Requires-Dist: httpx==0.25.1
227
227
  - PyPI: https://pypi.org/project/lionagi/
228
228
  - Documentation: https://lionagi.readthedocs.io/en/latest/ (still a lot TODO)
229
229
  - Website: TODO
230
- - Discord: [Join Our Discord](https://discord.gg/ACnynvvPjt)
230
+ - Discord: [Join Our Discord](https://discord.gg/7RGWqpSxze)
231
+
231
232
 
232
233
  # LionAGI
233
234
  **Towards Automated General Intelligence**
@@ -286,7 +287,7 @@ Visit our notebooks for our examples.
286
287
 
287
288
  ### Community
288
289
 
289
- We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members on [Discord](https://discord.gg/ACnynvvPjt)
290
+ We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members [Join Our Discord](https://discord.gg/7RGWqpSxze)
290
291
 
291
292
  ### Citation
292
293
 
@@ -5,7 +5,8 @@
5
5
  - PyPI: https://pypi.org/project/lionagi/
6
6
  - Documentation: https://lionagi.readthedocs.io/en/latest/ (still a lot TODO)
7
7
  - Website: TODO
8
- - Discord: [Join Our Discord](https://discord.gg/ACnynvvPjt)
8
+ - Discord: [Join Our Discord](https://discord.gg/7RGWqpSxze)
9
+
9
10
 
10
11
  # LionAGI
11
12
  **Towards Automated General Intelligence**
@@ -64,7 +65,7 @@ Visit our notebooks for our examples.
64
65
 
65
66
  ### Community
66
67
 
67
- We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members on [Discord](https://discord.gg/ACnynvvPjt)
68
+ We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members [Join Our Discord](https://discord.gg/7RGWqpSxze)
68
69
 
69
70
  ### Citation
70
71
 
@@ -0,0 +1,96 @@
1
+ from .message import Message
2
+
3
+ class Conversation:
4
+ """
5
+ A class representing a conversation between users and the assistant.
6
+
7
+ This class manages the exchange of messages within a conversation, including system settings,
8
+ user instructions, and assistant responses.
9
+
10
+ Attributes:
11
+ response_counts (int): The count of assistant responses in the conversation.
12
+ messages (list): A list to store messages in the conversation.
13
+ msg (Message): An instance of the Message class for creating messages.
14
+ responses (list): A list to store assistant responses in the conversation.
15
+
16
+ Methods:
17
+ initiate_conversation(system, instruction, context=None, name=None):
18
+ Initiate a conversation with a system setting and user instruction.
19
+
20
+ add_messages(system, instruction, context=None, response=None, tool=None, name=None):
21
+ Add messages to the conversation, including system setting, user instruction, and assistant response.
22
+
23
+ change_system(system):
24
+ Change the system setting in the conversation.
25
+
26
+ keep_last_n_exchanges(n: int):
27
+ Keep the last n exchanges in the conversation.
28
+ """
29
+ response_counts = 0
30
+
31
+ def __init__(self, messages=None) -> None:
32
+ """
33
+ Initialize a Conversation object.
34
+
35
+ Parameters:
36
+ messages (list): A list of messages to initialize the conversation. Default is None.
37
+
38
+ """
39
+ self.messages = messages or []
40
+ self.msg = Message()
41
+ self.responses = []
42
+
43
+ def initiate_conversation(self, system=None, instruction=None, context=None, name=None):
44
+ """
45
+ Initiate a conversation with a system setting and user instruction.
46
+
47
+ Parameters:
48
+ system (str): The system setting for the conversation.
49
+ instruction (str): The user instruction to initiate the conversation.
50
+ context (dict): Additional context for the conversation. Default is None.
51
+ name (str): The name associated with the user. Default is None.
52
+ """
53
+ self.messages, self.responses = [], []
54
+ self.add_messages(system=system)
55
+ self.add_messages(instruction=instruction, context=context, name=name)
56
+
57
+ # modify the message adding to accomodate tools
58
+ def add_messages(self, system=None, instruction=None, context=None, response=None, name=None):
59
+ """
60
+ Add messages to the conversation, including system setting, user instruction, and assistant response.
61
+
62
+ Parameters:
63
+ system (str): The system setting for the message. Default is None.
64
+ instruction (str): The instruction content for the message. Default is None.
65
+ context (dict): Additional context for the message. Default is None.
66
+ response (dict): The response content for the message. Default is None.
67
+ tool (dict): The tool information for the message. Default is None.
68
+ name (str): The name associated with the message. Default is None.
69
+ """
70
+ msg = self.msg(system=system, instruction=instruction, context=context,
71
+ response=response, name=name)
72
+ self.messages.append(msg)
73
+
74
+ def change_system(self, system):
75
+ """
76
+ Change the system setting in the conversation.
77
+
78
+ Parameters:
79
+ system (str): The new system setting for the conversation.
80
+ """
81
+ self.messages[0] = self.msg(system=system)
82
+
83
+ def keep_last_n_exchanges(self, n: int):
84
+ """
85
+ Keep the last n exchanges in the conversation.
86
+
87
+ Parameters:
88
+ n (int): The number of exchanges to keep.
89
+ """
90
+ # keep last n_exchanges, one exchange is marked by one assistant response
91
+ response_indices = [
92
+ index for index, message in enumerate(self.messages[1:]) if message["role"] == "assistant"
93
+ ]
94
+ if len(response_indices) >= n:
95
+ first_index_to_keep = response_indices[-n] + 1
96
+ self.messages = [self.system] + self.messages[first_index_to_keep:]
@@ -0,0 +1,142 @@
1
+ from datetime import datetime
2
+ import json
3
+ from ..utils.sys_util import create_id, l_call
4
+ from ..utils.log_util import DataLogger
5
+
6
+
7
+ class Message:
8
+ """
9
+ A class representing a message in a conversation.
10
+
11
+ This class encapsulates messages from users, the assistant, systems, and external tools.
12
+
13
+ Attributes:
14
+ role (str): The role of the message, indicating if it's from the user, assistant, system, or tool.
15
+ content: The content of the message, which can be an instruction, response, system setting, or tool information.
16
+ name (str): The name associated with the message, specifying the source (user, assistant, system, or tool).
17
+ metadata (dict): Additional metadata including id, timestamp, and name.
18
+ _logger (DataLogger): An instance of the DataLogger class for logging message details.
19
+
20
+ Methods:
21
+ create_message(system, instruction, context, response, tool, name):
22
+ Create a message based on the provided information.
23
+
24
+ to_json() -> dict:
25
+ Convert the message to a JSON format.
26
+
27
+ __call__(system, instruction, context, response, name, tool) -> dict:
28
+ Create and return a message in JSON format.
29
+
30
+ to_csv(dir, filename, verbose, timestamp, dir_exist_ok, file_exist_ok):
31
+ Save the message to a CSV file.
32
+ """
33
+ def __init__(self) -> None:
34
+ """
35
+ Initialize a Message object.
36
+ """
37
+ self.role = None
38
+ self.content = None
39
+ self.name = None
40
+ self.metadata = None
41
+ self._logger = DataLogger()
42
+
43
+ def create_message(self, system=None, instruction=None, context=None, response=None, name=None):
44
+
45
+ """
46
+ Create a message based on the provided information.
47
+
48
+ Parameters:
49
+ system (str): The system setting for the message. Default is None.
50
+ instruction (str): The instruction content for the message. Default is None.
51
+ context (dict): Additional context for the message. Default is None.
52
+ response (dict): The response content for the message. Default is None.
53
+ tool (dict): The tool information for the message. Default is None.
54
+ name (str): The name associated with the message. Default is None.
55
+ """
56
+ if sum(l_call([system, instruction, response], bool)) > 1:
57
+ raise ValueError("Error: Message cannot have more than one role.")
58
+
59
+ else:
60
+ if response:
61
+ self.role = "assistant"
62
+ try:
63
+ response = response["message"]
64
+ if str(response['content']) == "None":
65
+ try:
66
+ if response['tool_calls'][0]['type'] == 'function':
67
+ self.name = name or ("func_" + response['tool_calls'][0]['function']['name'])
68
+ content = response['tool_calls'][0]['function']['arguments']
69
+ self.content = {"function":self.name, "arguments": content}
70
+ except:
71
+ raise ValueError("Response message must be one of regular response or function calling")
72
+ else:
73
+ self.content = response['content']
74
+ self.name = name or "assistant"
75
+ except:
76
+ self.name = name or "func_call"
77
+ self.content = {"function call result": response}
78
+
79
+ elif instruction:
80
+ self.role = "user"
81
+ self.content = {"instruction": instruction}
82
+ self.name = name or "user"
83
+ if context:
84
+ self.content.update({"context": context})
85
+ elif system:
86
+ self.role = "system"
87
+ self.content = system
88
+ self.name = name or "system"
89
+
90
+ def to_json(self):
91
+ """
92
+ Convert the message to a JSON format.
93
+
94
+ Returns:
95
+ - dict: The message in JSON format.
96
+ """
97
+ out = {
98
+ "role": self.role,
99
+ "content": json.dumps(self.content) if isinstance(self.content, dict) else self.content
100
+ }
101
+
102
+ self.metadata = {
103
+ "id": create_id(),
104
+ "timestamp": datetime.now().isoformat(),
105
+ "name": self.name}
106
+
107
+ self._logger({**self.metadata, **out})
108
+ return out
109
+
110
+ def __call__(self, system=None, instruction=None, context=None,
111
+ response=None, name=None):
112
+ """
113
+ Create and return a message in JSON format.
114
+
115
+ Parameters:
116
+ system (str): The system setting for the message. Default is None.
117
+ instruction (str): The instruction content for the message. Default is None.
118
+ context (dict): Additional context for the message. Default is None.
119
+ response (dict): The response content for the message. Default is None.
120
+ name (str): The name associated with the message. Default is None.
121
+ tool (dict): The tool information for the message. Default is None.
122
+
123
+ Returns:
124
+ dict: The message in JSON format.
125
+ """
126
+ self.create_message(system=system, instruction=instruction,
127
+ context=context, response=response, name=name)
128
+ return self.to_json()
129
+
130
+ def to_csv(self, dir=None, filename=None, verbose=True, timestamp=True, dir_exist_ok=True, file_exist_ok=False):
131
+ """
132
+ Save the message to a CSV file.
133
+
134
+ Parameters:
135
+ dir (str): The directory path for saving the CSV file. Default is None.
136
+ filename (str): The filename for the CSV file. Default is None.
137
+ verbose (bool): Whether to include verbose information in the CSV. Default is True.
138
+ timestamp (bool): Whether to include timestamps in the CSV. Default is True.
139
+ dir_exist_ok (bool): Whether to allow the directory to exist. Default is True.
140
+ file_exist_ok (bool): Whether to allow the file to exist. Default is False.
141
+ """
142
+ self._logger.to_csv(dir, filename, verbose, timestamp, dir_exist_ok, file_exist_ok)
@@ -0,0 +1,309 @@
1
+ import aiohttp
2
+ import asyncio
3
+ import json
4
+ from typing import Any
5
+
6
+ from .conversation import Conversation
7
+ from ..utils.sys_util import to_list
8
+ from ..utils.log_util import DataLogger
9
+ from ..utils.api_util import StatusTracker
10
+ from ..utils.tool_util import ToolManager
11
+ from ..api.oai_service import OpenAIService
12
+
13
+ from ..api.oai_config import oai_llmconfig
14
+
15
+
16
+ status_tracker = StatusTracker()
17
+ OAIService = OpenAIService()
18
+
19
+ class Session():
20
+ """
21
+ A class representing a conversation session with a conversational AI system.
22
+
23
+ This class manages the flow of conversation, system settings, and interactions with external tools.
24
+
25
+ Attributes:
26
+ conversation (Conversation): An instance of the Conversation class to manage messages.
27
+ system (str): The current system setting for the conversation.
28
+ llmconfig (dict): Configuration settings for the language model.
29
+ _logger (DataLogger): An instance of the DataLogger class for logging conversation details.
30
+ api_service: An instance of the API service for making calls to the conversational AI model.
31
+ toolmanager (ToolManager): An instance of the ToolManager class for managing external tools.
32
+
33
+ Methods:
34
+ set_dir(dir):
35
+ Set the directory for logging.
36
+
37
+ set_system(system):
38
+ Set the system for the conversation.
39
+
40
+ set_llmconfig(llmconfig):
41
+ Set the language model configuration.
42
+
43
+ set_api_service(api_service):
44
+ Set the API service for making model calls.
45
+
46
+ _output(output, invoke=True, out=True) -> Any:
47
+ Process the output, invoke tools if needed, and optionally return the output.
48
+
49
+ register_tools(tools, funcs, update=False, new=False, prefix=None, postfix=None):
50
+ Register tools and their corresponding functions.
51
+
52
+ initiate(instruction, system=None, context=None, out=True, name=None, invoke=True, **kwargs) -> Any:
53
+ Start a new conversation session with the provided instruction.
54
+
55
+ followup(instruction, system=None, context=None, out=True, name=None, invoke=True, **kwargs) -> Any:
56
+ Continue the conversation with the provided instruction.
57
+
58
+ create_payload_chatcompletion(**kwargs) -> dict:
59
+ Create a payload for chat completion based on the conversation state and configuration.
60
+
61
+ call_chatcompletion(sleep=0.1, **kwargs) -> None:
62
+ Make a call to the chat completion API and process the response.
63
+
64
+ messages_to_csv(dir=None, filename="_messages.csv", **kwargs) -> None:
65
+ Save conversation messages to a CSV file.
66
+
67
+ log_to_csv(dir=None, filename="_llmlog.csv", **kwargs) -> None:
68
+ Save conversation logs to a CSV file.
69
+ """
70
+
71
+ def __init__(self, system, dir=None, llmconfig=oai_llmconfig, api_service=OAIService):
72
+ """
73
+ Initialize a Session object with default or provided settings.
74
+
75
+ Parameters:
76
+ system (str): The initial system setting for the conversation.
77
+ dir (Optional[str]): The directory for logging. Default is None.
78
+ llmconfig (Optional[dict]): Configuration settings for the language model. Default is oai_llmconfig.
79
+ api_service: An instance of the API service for making calls to the conversational AI model.
80
+ """
81
+ self.conversation = Conversation()
82
+ self.system = system
83
+ self.llmconfig = llmconfig
84
+ self._logger = DataLogger(dir=dir)
85
+ self.api_service = api_service
86
+ self.toolmanager = ToolManager()
87
+
88
+ def set_dir(self, dir):
89
+ """
90
+ Set the directory for logging.
91
+
92
+ Parameters:
93
+ dir (str): The directory path.
94
+ """
95
+ self._logger.dir = dir
96
+
97
+ def set_system(self, system):
98
+ """
99
+ Set the system for the conversation.
100
+
101
+ Parameters:
102
+ system (str): The system setting.
103
+ """
104
+ self.conversation.change_system(system)
105
+
106
+ def set_llmconfig(self, llmconfig):
107
+ """
108
+ Set the language model configuration.
109
+
110
+ Parameters:
111
+ llmconfig (dict): Configuration settings for the language model.
112
+ """
113
+ self.llmconfig = llmconfig
114
+
115
+ def set_api_service(self, api_service):
116
+ """
117
+ Set the API service for making model calls.
118
+
119
+ Parameters:
120
+ api_service: An instance of the API service.
121
+ """
122
+ self.api_service = api_service
123
+
124
+ async def _output(self, invoke=True, out=True, tool_parser=None):
125
+ """
126
+ Process the output, invoke tools if needed, and optionally return the output.
127
+
128
+ Parameters:
129
+ output: The output to process.
130
+ invoke (bool): Whether to invoke tools based on the output. Default is True.
131
+ out (bool): Whether to return the output. Default is True.
132
+
133
+ Returns:
134
+ Any: The processed output.
135
+ """
136
+ if invoke:
137
+ try:
138
+ func, args = self.toolmanager._get_function_call(self.conversation.responses[-1]['content'])
139
+ outs = await self.toolmanager.ainvoke(func, args)
140
+ outs = tool_parser(outs) if tool_parser else outs
141
+ self.conversation.add_messages(response=outs)
142
+ except:
143
+ pass
144
+ if out:
145
+ return self.conversation.responses[-1]['content']
146
+
147
+ def register_tools(self, tools, funcs, update=False, new=False, prefix=None, postfix=None):
148
+ """
149
+ Register tools and their corresponding functions.
150
+
151
+ Parameters:
152
+ tools (list): The list of tool information dictionaries.
153
+ funcs (list): The list of corresponding functions.
154
+ update (bool): Whether to update existing functions.
155
+ new (bool): Whether to create new registries for existing functions.
156
+ prefix (Optional[str]): A prefix to add to the function names.
157
+ postfix (Optional[str]): A postfix to add to the function names.
158
+ """
159
+ funcs = to_list(funcs)
160
+ self.toolmanager.register_tools(tools, funcs, update, new, prefix, postfix)
161
+
162
+ async def initiate(self, instruction, system=None, context=None, name=None, invoke=True, out=True, tool_parser=None, **kwargs) -> Any:
163
+ """
164
+ Start a new conversation session with the provided instruction.
165
+
166
+ Parameters:
167
+ instruction (str): The instruction to initiate the conversation.
168
+ system (Optional[str]): The system setting for the conversation. Default is None.
169
+ context (Optional[dict]): Additional context for the instruction. Default is None.
170
+ out (bool): Whether to return the output. Default is True.
171
+ name (Optional[str]): The name associated with the instruction. Default is None.
172
+ invoke (bool): Whether to invoke tools based on the output. Default is True.
173
+ kwargs: Additional keyword arguments for configuration.
174
+
175
+ Returns:
176
+ Any: The processed output.
177
+ """
178
+ config = {**self.llmconfig, **kwargs}
179
+ system = system or self.system
180
+ self.conversation.initiate_conversation(system=system, instruction=instruction, context=context, name=name)
181
+ await self.call_chatcompletion(**config)
182
+
183
+ return await self._output(invoke, out, tool_parser)
184
+
185
+ async def followup(self, instruction, system=None, context=None, out=True, name=None, invoke=True, tool_parser=None, **kwargs) -> Any:
186
+ """
187
+ Continue the conversation with the provided instruction.
188
+
189
+ Parameters:
190
+ instruction (str): The instruction to continue the conversation.
191
+ system (Optional[str]): The system setting for the conversation. Default is None.
192
+ context (Optional[dict]): Additional context for the instruction. Default is None.
193
+ out (bool): Whether to return the output. Default is True.
194
+ name (Optional[str]): The name associated with the instruction. Default is None.
195
+ invoke (bool): Whether to invoke tools based on the output. Default is True.
196
+ kwargs: Additional keyword arguments for configuration.
197
+
198
+ Returns:
199
+ Any: The processed output.
200
+ """
201
+ if system:
202
+ self.conversation.change_system(system)
203
+ self.conversation.add_messages(instruction=instruction, context=context, name=name)
204
+ config = {**self.llmconfig, **kwargs}
205
+ await self.call_chatcompletion(**config)
206
+
207
+
208
+ return await self._output(invoke, out, tool_parser)
209
+
210
+ def create_payload_chatcompletion(self, **kwargs):
211
+ """
212
+ Create a payload for chat completion based on the conversation state and configuration.
213
+
214
+ Parameters:
215
+ kwargs: Additional keyword arguments for configuration.
216
+
217
+ Returns:
218
+ dict: The payload for chat completion.
219
+ """
220
+ # currently only openai chat completions are supported
221
+ messages = self.conversation.messages
222
+ config = {**self.llmconfig, **kwargs}
223
+ payload = {
224
+ "messages": messages,
225
+ "model": config.get('model'),
226
+ "frequency_penalty": config.get('frequency_penalty'),
227
+ "n": config.get('n'),
228
+ "presence_penalty": config.get('presence_penalty'),
229
+ "response_format": config.get('response_format'),
230
+ "temperature": config.get('temperature'),
231
+ "top_p": config.get('top_p'),
232
+ }
233
+
234
+ for key in ["seed", "stop", "stream", "tools", "tool_choice", "user", "max_tokens"]:
235
+ if bool(config[key]) is True and str(config[key]) != "none":
236
+ payload.update({key: config[key]})
237
+ return payload
238
+
239
+ async def call_chatcompletion(self, sleep=0.1, **kwargs):
240
+ """
241
+ Make a call to the chat completion API and process the response.
242
+
243
+ Parameters:
244
+ sleep (float): The sleep duration after making the API call. Default is 0.1.
245
+ kwargs: Additional keyword arguments for configuration.
246
+ """
247
+ endpoint = f"chat/completions"
248
+ try:
249
+ async with aiohttp.ClientSession() as session:
250
+ payload = self.create_payload_chatcompletion(**kwargs)
251
+ completion = await self.api_service.call_api(
252
+ session, endpoint, payload)
253
+ if "choices" in completion:
254
+ self._logger({"input":payload, "output": completion})
255
+ self.conversation.add_messages(response=completion['choices'][0])
256
+ self.conversation.responses.append(self.conversation.messages[-1])
257
+ self.conversation.response_counts += 1
258
+ await asyncio.sleep(sleep)
259
+ status_tracker.num_tasks_succeeded += 1
260
+ else:
261
+ status_tracker.num_tasks_failed += 1
262
+ except Exception as e:
263
+ status_tracker.num_tasks_failed += 1
264
+ raise e
265
+
266
+ def messages_to_csv(self, dir=None, filename="_messages.csv", **kwags):
267
+ """
268
+ Save conversation messages to a CSV file.
269
+
270
+ Parameters:
271
+ dir (Optional[str]): The directory path for saving the CSV file. Default is None.
272
+ filename (Optional[str]): The filename for the CSV file. Default is "_messages.csv".
273
+ kwargs: Additional keyword arguments for CSV file settings.
274
+ """
275
+ dir = dir or self._logger.dir
276
+ if dir is None:
277
+ raise ValueError("No directory specified.")
278
+ self.conversation.msg.to_csv(dir=dir, filename=filename, **kwags)
279
+
280
+ def log_to_csv(self, dir=None, filename="_llmlog.csv", **kwags):
281
+ """
282
+ Save conversation logs to a CSV file.
283
+
284
+ Parameters:
285
+ dir (Optional[str]): The directory path for saving the CSV file. Default is None.
286
+ filename (Optional[str]): The filename for the CSV file. Default is "_llmlog.csv".
287
+ kwargs: Additional keyword arguments for CSV file settings.
288
+ """
289
+ dir = dir or self._logger.dir
290
+ if dir is None:
291
+ raise ValueError("No directory specified.")
292
+ self._logger.to_csv(dir=dir, filename=filename, **kwags)
293
+
294
+ def is_invoked(self):
295
+ msg = self.conversation.messages[-1]
296
+ try:
297
+ if "function call result" in json.loads(msg['content']).keys():
298
+ return True
299
+ except:
300
+ return False
301
+
302
+ async def auto_followup(self, instruct, num=3, tool_parser=None, **kwags):
303
+ cont_ = True
304
+ while num > 0 and cont_ is True:
305
+ await self.followup(instruct,tool_parser=tool_parser, tool_choice="auto", **kwags)
306
+ num -= 1
307
+ cont_ = True if self.is_invoked() else False
308
+ if num == 0:
309
+ await self.followup(instruct, **kwags)
File without changes
@@ -1,9 +1,10 @@
1
1
  from .sys_util import to_flat_dict, append_to_jsonl, to_list, str_to_num, make_copy, to_temp, to_csv, hold_call, ahold_call, l_call, al_call, m_call, am_call, e_call, ae_call, get_timestamp, create_path
2
2
  from .doc_util import dir_to_path, read_text, dir_to_files, chunk_text, file_to_chunks, file_to_chunks, get_bins
3
3
  from .log_util import DataLogger
4
+ from .tool_util import ToolManager
4
5
 
5
6
  __all__ = [
6
7
  "to_list", "str_to_num", "make_copy", "to_temp", "to_csv", "hold_call", "ahold_call", "l_call", "al_call", "m_call", "am_call", "e_call", "ae_call", "get_timestamp", "create_path", "to_flat_dict", "append_to_jsonl",
7
8
  "dir_to_path", "read_text", "dir_to_files", "chunk_text", "file_to_chunks", "file_to_chunks", "get_bins",
8
- "DataLogger"
9
+ "DataLogger", "ToolManager"
9
10
  ]