lionagi 0.0.104__tar.gz → 0.0.105__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (32) hide show
  1. {lionagi-0.0.104 → lionagi-0.0.105}/PKG-INFO +4 -3
  2. {lionagi-0.0.104 → lionagi-0.0.105}/README.md +3 -2
  3. lionagi-0.0.105/lionagi/session/conversation.py +95 -0
  4. lionagi-0.0.105/lionagi/session/message.py +139 -0
  5. lionagi-0.0.105/lionagi/session/session.py +291 -0
  6. lionagi-0.0.105/lionagi/tools/__init__.py +0 -0
  7. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/utils/__init__.py +2 -1
  8. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/utils/doc_util.py +38 -38
  9. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/utils/sys_util.py +6 -2
  10. lionagi-0.0.105/lionagi/utils/tool_util.py +194 -0
  11. lionagi-0.0.105/lionagi/version.py +1 -0
  12. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi.egg-info/PKG-INFO +4 -3
  13. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi.egg-info/SOURCES.txt +3 -1
  14. lionagi-0.0.104/lionagi/session/conversation.py +0 -91
  15. lionagi-0.0.104/lionagi/session/message.py +0 -76
  16. lionagi-0.0.104/lionagi/session/session.py +0 -165
  17. lionagi-0.0.104/lionagi/version.py +0 -1
  18. {lionagi-0.0.104 → lionagi-0.0.105}/LICENSE +0 -0
  19. {lionagi-0.0.104 → lionagi-0.0.105}/README.rst +0 -0
  20. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/__init__.py +0 -0
  21. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/api/__init__.py +0 -0
  22. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/api/oai_config.py +0 -0
  23. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/api/oai_service.py +0 -0
  24. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/session/__init__.py +0 -0
  25. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/utils/api_util.py +0 -0
  26. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi/utils/log_util.py +0 -0
  27. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi.egg-info/dependency_links.txt +0 -0
  28. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi.egg-info/requires.txt +0 -0
  29. {lionagi-0.0.104 → lionagi-0.0.105}/lionagi.egg-info/top_level.txt +0 -0
  30. {lionagi-0.0.104 → lionagi-0.0.105}/pyproject.toml +0 -0
  31. {lionagi-0.0.104 → lionagi-0.0.105}/setup.cfg +0 -0
  32. {lionagi-0.0.104 → lionagi-0.0.105}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lionagi
3
- Version: 0.0.104
3
+ Version: 0.0.105
4
4
  Summary: Towards automated general intelligence.
5
5
  Author: HaiyangLi
6
6
  Author-email: Haiyang Li <ocean@lionagi.ai>
@@ -227,7 +227,8 @@ Requires-Dist: httpx==0.25.1
227
227
  - PyPI: https://pypi.org/project/lionagi/
228
228
  - Documentation: https://lionagi.readthedocs.io/en/latest/ (still a lot TODO)
229
229
  - Website: TODO
230
- - Discord: [Join Our Discord](https://discord.gg/ACnynvvPjt)
230
+ - Discord: [Join Our Discord](https://discord.gg/7RGWqpSxze)
231
+
231
232
 
232
233
  # LionAGI
233
234
  **Towards Automated General Intelligence**
@@ -286,7 +287,7 @@ Visit our notebooks for our examples.
286
287
 
287
288
  ### Community
288
289
 
289
- We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members on [Discord](https://discord.gg/ACnynvvPjt)
290
+ We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members [Join Our Discord](https://discord.gg/7RGWqpSxze)
290
291
 
291
292
  ### Citation
292
293
 
@@ -5,7 +5,8 @@
5
5
  - PyPI: https://pypi.org/project/lionagi/
6
6
  - Documentation: https://lionagi.readthedocs.io/en/latest/ (still a lot TODO)
7
7
  - Website: TODO
8
- - Discord: [Join Our Discord](https://discord.gg/ACnynvvPjt)
8
+ - Discord: [Join Our Discord](https://discord.gg/7RGWqpSxze)
9
+
9
10
 
10
11
  # LionAGI
11
12
  **Towards Automated General Intelligence**
@@ -64,7 +65,7 @@ Visit our notebooks for our examples.
64
65
 
65
66
  ### Community
66
67
 
67
- We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members on [Discord](https://discord.gg/ACnynvvPjt)
68
+ We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members [Join Our Discord](https://discord.gg/7RGWqpSxze)
68
69
 
69
70
  ### Citation
70
71
 
@@ -0,0 +1,95 @@
1
+ from .message import Message
2
+
3
+ class Conversation:
4
+ """
5
+ A class representing a conversation between users and the assistant.
6
+
7
+ This class manages the exchange of messages within a conversation, including system settings,
8
+ user instructions, and assistant responses.
9
+
10
+ Attributes:
11
+ response_counts (int): The count of assistant responses in the conversation.
12
+ messages (list): A list to store messages in the conversation.
13
+ msg (Message): An instance of the Message class for creating messages.
14
+ responses (list): A list to store assistant responses in the conversation.
15
+
16
+ Methods:
17
+ initiate_conversation(system, instruction, context=None, name=None):
18
+ Initiate a conversation with a system setting and user instruction.
19
+
20
+ add_messages(system, instruction, context=None, response=None, tool=None, name=None):
21
+ Add messages to the conversation, including system setting, user instruction, and assistant response.
22
+
23
+ change_system(system):
24
+ Change the system setting in the conversation.
25
+
26
+ keep_last_n_exchanges(n: int):
27
+ Keep the last n exchanges in the conversation.
28
+ """
29
+ response_counts = 0
30
+
31
+ def __init__(self, messages=None) -> None:
32
+ """
33
+ Initialize a Conversation object.
34
+
35
+ Parameters:
36
+ messages (list): A list of messages to initialize the conversation. Default is None.
37
+
38
+ """
39
+ self.messages = messages or []
40
+ self.msg = Message()
41
+ self.responses = []
42
+
43
+ def initiate_conversation(self, system, instruction, context=None, name=None):
44
+ """
45
+ Initiate a conversation with a system setting and user instruction.
46
+
47
+ Parameters:
48
+ system (str): The system setting for the conversation.
49
+ instruction (str): The user instruction to initiate the conversation.
50
+ context (dict): Additional context for the conversation. Default is None.
51
+ name (str): The name associated with the user. Default is None.
52
+ """
53
+ self.messages, self.responses = [], []
54
+ self.add_messages(system=system)
55
+ self.add_messages(instruction=instruction, context=context, name=name)
56
+
57
+ # modify the message adding to accomodate tools
58
+ def add_messages(self, system=None, instruction=None, context=None, response=None, tool=None, name=None):
59
+ """
60
+ Add messages to the conversation, including system setting, user instruction, and assistant response.
61
+
62
+ Parameters:
63
+ system (str): The system setting for the message. Default is None.
64
+ instruction (str): The instruction content for the message. Default is None.
65
+ context (dict): Additional context for the message. Default is None.
66
+ response (dict): The response content for the message. Default is None.
67
+ tool (dict): The tool information for the message. Default is None.
68
+ name (str): The name associated with the message. Default is None.
69
+ """
70
+ msg = self.msg(system=system, instruction=instruction, context=context, response=response, tool=tool, name=name)
71
+ self.messages.append(msg)
72
+
73
+ def change_system(self, system):
74
+ """
75
+ Change the system setting in the conversation.
76
+
77
+ Parameters:
78
+ system (str): The new system setting for the conversation.
79
+ """
80
+ self.messages[0] = self.msg(system=system)
81
+
82
+ def keep_last_n_exchanges(self, n: int):
83
+ """
84
+ Keep the last n exchanges in the conversation.
85
+
86
+ Parameters:
87
+ n (int): The number of exchanges to keep.
88
+ """
89
+ # keep last n_exchanges, one exchange is marked by one assistant response
90
+ response_indices = [
91
+ index for index, message in enumerate(self.messages[1:]) if message["role"] == "assistant"
92
+ ]
93
+ if len(response_indices) >= n:
94
+ first_index_to_keep = response_indices[-n] + 1
95
+ self.messages = [self.system] + self.messages[first_index_to_keep:]
@@ -0,0 +1,139 @@
1
+ from datetime import datetime
2
+ import json
3
+ from ..utils.sys_util import create_id, l_call
4
+ from ..utils.log_util import DataLogger
5
+
6
+
7
+ class Message:
8
+ """
9
+ A class representing a message in a conversation.
10
+
11
+ This class encapsulates messages from users, the assistant, systems, and external tools.
12
+
13
+ Attributes:
14
+ role (str): The role of the message, indicating if it's from the user, assistant, system, or tool.
15
+ content: The content of the message, which can be an instruction, response, system setting, or tool information.
16
+ name (str): The name associated with the message, specifying the source (user, assistant, system, or tool).
17
+ metadata (dict): Additional metadata including id, timestamp, and name.
18
+ _logger (DataLogger): An instance of the DataLogger class for logging message details.
19
+
20
+ Methods:
21
+ create_message(system, instruction, context, response, tool, name):
22
+ Create a message based on the provided information.
23
+
24
+ to_json() -> dict:
25
+ Convert the message to a JSON format.
26
+
27
+ __call__(system, instruction, context, response, name, tool) -> dict:
28
+ Create and return a message in JSON format.
29
+
30
+ to_csv(dir, filename, verbose, timestamp, dir_exist_ok, file_exist_ok):
31
+ Save the message to a CSV file.
32
+ """
33
+ def __init__(self) -> None:
34
+ """
35
+ Initialize a Message object.
36
+ """
37
+ self.role = None
38
+ self.content = None
39
+ self.name = None
40
+ self.metadata = None
41
+ self._logger = DataLogger()
42
+
43
+ def create_message(self, system=None, instruction=None, context=None, response=None, tool=None, name=None):
44
+ """
45
+ Create a message based on the provided information.
46
+
47
+ Parameters:
48
+ system (str): The system setting for the message. Default is None.
49
+ instruction (str): The instruction content for the message. Default is None.
50
+ context (dict): Additional context for the message. Default is None.
51
+ response (dict): The response content for the message. Default is None.
52
+ tool (dict): The tool information for the message. Default is None.
53
+ name (str): The name associated with the message. Default is None.
54
+ """
55
+ if sum(l_call([system, instruction, response, tool], bool)) > 1:
56
+ raise ValueError("Error: Message cannot have more than one role.")
57
+
58
+ else:
59
+ if response:
60
+ self.role = "assistant"
61
+ response = response["message"]
62
+ if str(response['content']) == "None":
63
+ try:
64
+ # currently can only support a single function response
65
+ if response['tool_calls'][0]['type'] == 'function':
66
+ self.name = name or ("func_" + response['tool_calls'][0]['function']['name'])
67
+ content = response['tool_calls'][0]['function']['arguments']
68
+ self.content = {"function":self.name, "arguments": content}
69
+ except:
70
+ raise ValueError("Response message must be one of regular response or function calling")
71
+ else:
72
+ self.content = response['content']
73
+ self.name = name or "assistant"
74
+ elif instruction:
75
+ self.role = "user"
76
+ self.content = {"instruction": instruction}
77
+ self.name = name or "user"
78
+ if context:
79
+ self.content.update({"context": context})
80
+ elif system:
81
+ self.role = "system"
82
+ self.content = system
83
+ self.name = name or "system"
84
+ elif tool:
85
+ self.role = "tool"
86
+ self.content = tool
87
+ self.name = name or "tool"
88
+
89
+ def to_json(self):
90
+ """
91
+ Convert the message to a JSON format.
92
+
93
+ Returns:
94
+ - dict: The message in JSON format.
95
+ """
96
+ out = {
97
+ "role": self.role,
98
+ "content": json.dumps(self.content) if isinstance(self.content, dict) else self.content
99
+ }
100
+
101
+ self.metadata = {
102
+ "id": create_id(),
103
+ "timestamp": datetime.now().isoformat(),
104
+ "name": self.name}
105
+
106
+ self._logger({**self.metadata, **out})
107
+ return out
108
+
109
+ def __call__(self, system=None, instruction=None, context=None, response=None, name=None, tool=None):
110
+ """
111
+ Create and return a message in JSON format.
112
+
113
+ Parameters:
114
+ system (str): The system setting for the message. Default is None.
115
+ instruction (str): The instruction content for the message. Default is None.
116
+ context (dict): Additional context for the message. Default is None.
117
+ response (dict): The response content for the message. Default is None.
118
+ name (str): The name associated with the message. Default is None.
119
+ tool (dict): The tool information for the message. Default is None.
120
+
121
+ Returns:
122
+ dict: The message in JSON format.
123
+ """
124
+ self.create_message(system, instruction, context, response, tool, name)
125
+ return self.to_json()
126
+
127
+ def to_csv(self, dir=None, filename=None, verbose=True, timestamp=True, dir_exist_ok=True, file_exist_ok=False):
128
+ """
129
+ Save the message to a CSV file.
130
+
131
+ Parameters:
132
+ dir (str): The directory path for saving the CSV file. Default is None.
133
+ filename (str): The filename for the CSV file. Default is None.
134
+ verbose (bool): Whether to include verbose information in the CSV. Default is True.
135
+ timestamp (bool): Whether to include timestamps in the CSV. Default is True.
136
+ dir_exist_ok (bool): Whether to allow the directory to exist. Default is True.
137
+ file_exist_ok (bool): Whether to allow the file to exist. Default is False.
138
+ """
139
+ self._logger.to_csv(dir, filename, verbose, timestamp, dir_exist_ok, file_exist_ok)
@@ -0,0 +1,291 @@
1
+ import aiohttp
2
+ import asyncio
3
+ from typing import Any
4
+
5
+ from .conversation import Conversation
6
+ from ..utils.sys_util import to_list
7
+ from ..utils.log_util import DataLogger
8
+ from ..utils.api_util import StatusTracker
9
+ from ..utils.tool_util import ToolManager
10
+ from ..api.oai_service import OpenAIService
11
+
12
+ from ..api.oai_config import oai_llmconfig
13
+
14
+
15
+ status_tracker = StatusTracker()
16
+ OAIService = OpenAIService()
17
+
18
+ class Session():
19
+ """
20
+ A class representing a conversation session with a conversational AI system.
21
+
22
+ This class manages the flow of conversation, system settings, and interactions with external tools.
23
+
24
+ Attributes:
25
+ conversation (Conversation): An instance of the Conversation class to manage messages.
26
+ system (str): The current system setting for the conversation.
27
+ llmconfig (dict): Configuration settings for the language model.
28
+ _logger (DataLogger): An instance of the DataLogger class for logging conversation details.
29
+ api_service: An instance of the API service for making calls to the conversational AI model.
30
+ toolmanager (ToolManager): An instance of the ToolManager class for managing external tools.
31
+
32
+ Methods:
33
+ set_dir(dir):
34
+ Set the directory for logging.
35
+
36
+ set_system(system):
37
+ Set the system for the conversation.
38
+
39
+ set_llmconfig(llmconfig):
40
+ Set the language model configuration.
41
+
42
+ set_api_service(api_service):
43
+ Set the API service for making model calls.
44
+
45
+ _output(output, invoke=True, out=True) -> Any:
46
+ Process the output, invoke tools if needed, and optionally return the output.
47
+
48
+ register_tools(tools, funcs, update=False, new=False, prefix=None, postfix=None):
49
+ Register tools and their corresponding functions.
50
+
51
+ initiate(instruction, system=None, context=None, out=True, name=None, invoke=True, **kwargs) -> Any:
52
+ Start a new conversation session with the provided instruction.
53
+
54
+ followup(instruction, system=None, context=None, out=True, name=None, invoke=True, **kwargs) -> Any:
55
+ Continue the conversation with the provided instruction.
56
+
57
+ create_payload_chatcompletion(**kwargs) -> dict:
58
+ Create a payload for chat completion based on the conversation state and configuration.
59
+
60
+ call_chatcompletion(sleep=0.1, **kwargs) -> None:
61
+ Make a call to the chat completion API and process the response.
62
+
63
+ messages_to_csv(dir=None, filename="_messages.csv", **kwargs) -> None:
64
+ Save conversation messages to a CSV file.
65
+
66
+ log_to_csv(dir=None, filename="_llmlog.csv", **kwargs) -> None:
67
+ Save conversation logs to a CSV file.
68
+ """
69
+
70
+ def __init__(self, system, dir=None, llmconfig=oai_llmconfig, api_service=OAIService):
71
+ """
72
+ Initialize a Session object with default or provided settings.
73
+
74
+ Parameters:
75
+ system (str): The initial system setting for the conversation.
76
+ dir (Optional[str]): The directory for logging. Default is None.
77
+ llmconfig (Optional[dict]): Configuration settings for the language model. Default is oai_llmconfig.
78
+ api_service: An instance of the API service for making calls to the conversational AI model.
79
+ """
80
+ self.conversation = Conversation()
81
+ self.system = system
82
+ self.llmconfig = llmconfig
83
+ self._logger = DataLogger(dir=dir)
84
+ self.api_service = api_service
85
+ self.toolmanager = ToolManager()
86
+
87
+ def set_dir(self, dir):
88
+ """
89
+ Set the directory for logging.
90
+
91
+ Parameters:
92
+ dir (str): The directory path.
93
+ """
94
+ self._logger.dir = dir
95
+
96
+ def set_system(self, system):
97
+ """
98
+ Set the system for the conversation.
99
+
100
+ Parameters:
101
+ system (str): The system setting.
102
+ """
103
+ self.conversation.change_system(system)
104
+
105
+ def set_llmconfig(self, llmconfig):
106
+ """
107
+ Set the language model configuration.
108
+
109
+ Parameters:
110
+ llmconfig (dict): Configuration settings for the language model.
111
+ """
112
+ self.llmconfig = llmconfig
113
+
114
+ def set_api_service(self, api_service):
115
+ """
116
+ Set the API service for making model calls.
117
+
118
+ Parameters:
119
+ api_service: An instance of the API service.
120
+ """
121
+ self.api_service = api_service
122
+
123
+ async def _output(self, output, invoke=True, out=True):
124
+ """
125
+ Process the output, invoke tools if needed, and optionally return the output.
126
+
127
+ Parameters:
128
+ output: The output to process.
129
+ invoke (bool): Whether to invoke tools based on the output. Default is True.
130
+ out (bool): Whether to return the output. Default is True.
131
+
132
+ Returns:
133
+ Any: The processed output.
134
+ """
135
+ if invoke:
136
+ try:
137
+ func, args = self.toolmanager._get_function_call(output)
138
+ outs = await self.toolmanager.ainvoke(func, args)
139
+ self.conversation.add_messages(tool=outs)
140
+ except:
141
+ pass
142
+ if out:
143
+ return output
144
+
145
+ def register_tools(self, tools, funcs, update=False, new=False, prefix=None, postfix=None):
146
+ """
147
+ Register tools and their corresponding functions.
148
+
149
+ Parameters:
150
+ tools (list): The list of tool information dictionaries.
151
+ funcs (list): The list of corresponding functions.
152
+ update (bool): Whether to update existing functions.
153
+ new (bool): Whether to create new registries for existing functions.
154
+ prefix (Optional[str]): A prefix to add to the function names.
155
+ postfix (Optional[str]): A postfix to add to the function names.
156
+ """
157
+ funcs = to_list(funcs)
158
+ self.toolmanager.register_tools(tools, funcs, update, new, prefix, postfix)
159
+
160
+ async def initiate(self, instruction, system=None, context=None, out=True, name=None, invoke=True, **kwargs) -> Any:
161
+ """
162
+ Start a new conversation session with the provided instruction.
163
+
164
+ Parameters:
165
+ instruction (str): The instruction to initiate the conversation.
166
+ system (Optional[str]): The system setting for the conversation. Default is None.
167
+ context (Optional[dict]): Additional context for the instruction. Default is None.
168
+ out (bool): Whether to return the output. Default is True.
169
+ name (Optional[str]): The name associated with the instruction. Default is None.
170
+ invoke (bool): Whether to invoke tools based on the output. Default is True.
171
+ kwargs: Additional keyword arguments for configuration.
172
+
173
+ Returns:
174
+ Any: The processed output.
175
+ """
176
+ config = {**self.llmconfig, **kwargs}
177
+ system = system or self.system
178
+ self.conversation.initiate_conversation(system=system, instruction=instruction, context=context, name=name)
179
+ await self.call_chatcompletion(**config)
180
+ output = self.conversation.responses[-1]['content']
181
+
182
+ return await self._output(output, invoke, out)
183
+
184
+ async def followup(self, instruction, system=None, context=None, out=True, name=None, invoke=True, **kwargs) -> Any:
185
+ """
186
+ Continue the conversation with the provided instruction.
187
+
188
+ Parameters:
189
+ instruction (str): The instruction to continue the conversation.
190
+ system (Optional[str]): The system setting for the conversation. Default is None.
191
+ context (Optional[dict]): Additional context for the instruction. Default is None.
192
+ out (bool): Whether to return the output. Default is True.
193
+ name (Optional[str]): The name associated with the instruction. Default is None.
194
+ invoke (bool): Whether to invoke tools based on the output. Default is True.
195
+ kwargs: Additional keyword arguments for configuration.
196
+
197
+ Returns:
198
+ Any: The processed output.
199
+ """
200
+ if system:
201
+ self.conversation.change_system(system)
202
+ self.conversation.add_messages(instruction=instruction, context=context, name=name)
203
+ config = {**self.llmconfig, **kwargs}
204
+ await self.call_chatcompletion(**config)
205
+ output = self.conversation.responses[-1]['content']
206
+
207
+ return await self._output(output, invoke, out)
208
+
209
+ def create_payload_chatcompletion(self, **kwargs):
210
+ """
211
+ Create a payload for chat completion based on the conversation state and configuration.
212
+
213
+ Parameters:
214
+ kwargs: Additional keyword arguments for configuration.
215
+
216
+ Returns:
217
+ dict: The payload for chat completion.
218
+ """
219
+ # currently only openai chat completions are supported
220
+ messages = self.conversation.messages
221
+ config = {**self.llmconfig, **kwargs}
222
+ payload = {
223
+ "messages": messages,
224
+ "model": config.get('model'),
225
+ "frequency_penalty": config.get('frequency_penalty'),
226
+ "n": config.get('n'),
227
+ "presence_penalty": config.get('presence_penalty'),
228
+ "response_format": config.get('response_format'),
229
+ "temperature": config.get('temperature'),
230
+ "top_p": config.get('top_p'),
231
+ }
232
+
233
+ for key in ["seed", "stop", "stream", "tools", "tool_choice", "user", "max_tokens"]:
234
+ if bool(config[key]) is True and str(config[key]) != "none":
235
+ payload.update({key: config[key]})
236
+ return payload
237
+
238
+ async def call_chatcompletion(self, sleep=0.1, **kwargs):
239
+ """
240
+ Make a call to the chat completion API and process the response.
241
+
242
+ Parameters:
243
+ sleep (float): The sleep duration after making the API call. Default is 0.1.
244
+ kwargs: Additional keyword arguments for configuration.
245
+ """
246
+ endpoint = f"chat/completions"
247
+ try:
248
+ async with aiohttp.ClientSession() as session:
249
+ payload = self.create_payload_chatcompletion(**kwargs)
250
+ completion = await self.api_service.call_api(
251
+ session, endpoint, payload)
252
+ if "choices" in completion:
253
+ self._logger({"input":payload, "output": completion})
254
+ self.conversation.add_messages(response=completion['choices'][0])
255
+ self.conversation.responses.append(self.conversation.messages[-1])
256
+ self.conversation.response_counts += 1
257
+ await asyncio.sleep(sleep)
258
+ status_tracker.num_tasks_succeeded += 1
259
+ else:
260
+ status_tracker.num_tasks_failed += 1
261
+ except Exception as e:
262
+ status_tracker.num_tasks_failed += 1
263
+ raise e
264
+
265
+ def messages_to_csv(self, dir=None, filename="_messages.csv", **kwags):
266
+ """
267
+ Save conversation messages to a CSV file.
268
+
269
+ Parameters:
270
+ dir (Optional[str]): The directory path for saving the CSV file. Default is None.
271
+ filename (Optional[str]): The filename for the CSV file. Default is "_messages.csv".
272
+ kwargs: Additional keyword arguments for CSV file settings.
273
+ """
274
+ dir = dir or self._logger.dir
275
+ if dir is None:
276
+ raise ValueError("No directory specified.")
277
+ self.conversation.msg.to_csv(dir=dir, filename=filename, **kwags)
278
+
279
+ def log_to_csv(self, dir=None, filename="_llmlog.csv", **kwags):
280
+ """
281
+ Save conversation logs to a CSV file.
282
+
283
+ Parameters:
284
+ dir (Optional[str]): The directory path for saving the CSV file. Default is None.
285
+ filename (Optional[str]): The filename for the CSV file. Default is "_llmlog.csv".
286
+ kwargs: Additional keyword arguments for CSV file settings.
287
+ """
288
+ dir = dir or self._logger.dir
289
+ if dir is None:
290
+ raise ValueError("No directory specified.")
291
+ self._logger.to_csv(dir=dir, filename=filename, **kwags)
File without changes
@@ -1,9 +1,10 @@
1
1
  from .sys_util import to_flat_dict, append_to_jsonl, to_list, str_to_num, make_copy, to_temp, to_csv, hold_call, ahold_call, l_call, al_call, m_call, am_call, e_call, ae_call, get_timestamp, create_path
2
2
  from .doc_util import dir_to_path, read_text, dir_to_files, chunk_text, file_to_chunks, file_to_chunks, get_bins
3
3
  from .log_util import DataLogger
4
+ from .tool_util import ToolManager
4
5
 
5
6
  __all__ = [
6
7
  "to_list", "str_to_num", "make_copy", "to_temp", "to_csv", "hold_call", "ahold_call", "l_call", "al_call", "m_call", "am_call", "e_call", "ae_call", "get_timestamp", "create_path", "to_flat_dict", "append_to_jsonl",
7
8
  "dir_to_path", "read_text", "dir_to_files", "chunk_text", "file_to_chunks", "file_to_chunks", "get_bins",
8
- "DataLogger"
9
+ "DataLogger", "ToolManager"
9
10
  ]