lionagi 0.0.111__py3-none-any.whl → 0.0.113__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +7 -2
- lionagi/bridge/__init__.py +7 -0
- lionagi/bridge/langchain.py +131 -0
- lionagi/bridge/llama_index.py +157 -0
- lionagi/configs/__init__.py +7 -0
- lionagi/configs/oai_configs.py +49 -0
- lionagi/configs/openrouter_config.py +49 -0
- lionagi/core/__init__.py +15 -0
- lionagi/{session/conversation.py → core/conversations.py} +10 -17
- lionagi/core/flows.py +1 -0
- lionagi/core/instruction_sets.py +1 -0
- lionagi/{session/message.py → core/messages.py} +5 -5
- lionagi/core/sessions.py +262 -0
- lionagi/datastore/__init__.py +1 -0
- lionagi/datastore/chroma.py +1 -0
- lionagi/datastore/deeplake.py +1 -0
- lionagi/datastore/elasticsearch.py +1 -0
- lionagi/datastore/lantern.py +1 -0
- lionagi/datastore/pinecone.py +1 -0
- lionagi/datastore/postgres.py +1 -0
- lionagi/datastore/qdrant.py +1 -0
- lionagi/loader/__init__.py +12 -0
- lionagi/loader/chunker.py +157 -0
- lionagi/loader/reader.py +124 -0
- lionagi/objs/__init__.py +7 -0
- lionagi/objs/messenger.py +163 -0
- lionagi/objs/tool_registry.py +247 -0
- lionagi/schema/__init__.py +11 -0
- lionagi/schema/base_condition.py +1 -0
- lionagi/schema/base_schema.py +239 -0
- lionagi/schema/base_tool.py +9 -0
- lionagi/schema/data_logger.py +94 -0
- lionagi/services/__init__.py +14 -0
- lionagi/services/anthropic.py +1 -0
- lionagi/services/anyscale.py +0 -0
- lionagi/services/azure.py +1 -0
- lionagi/{api/oai_service.py → services/base_api_service.py} +74 -148
- lionagi/services/bedrock.py +0 -0
- lionagi/services/chatcompletion.py +48 -0
- lionagi/services/everlyai.py +0 -0
- lionagi/services/gemini.py +0 -0
- lionagi/services/gpt4all.py +0 -0
- lionagi/services/huggingface.py +0 -0
- lionagi/services/litellm.py +1 -0
- lionagi/services/localai.py +0 -0
- lionagi/services/mistralai.py +0 -0
- lionagi/services/oai.py +34 -0
- lionagi/services/ollama.py +1 -0
- lionagi/services/openllm.py +0 -0
- lionagi/services/openrouter.py +32 -0
- lionagi/services/perplexity.py +0 -0
- lionagi/services/predibase.py +0 -0
- lionagi/services/rungpt.py +0 -0
- lionagi/services/service_objs.py +282 -0
- lionagi/services/vllm.py +0 -0
- lionagi/services/xinference.py +0 -0
- lionagi/structure/__init__.py +7 -0
- lionagi/structure/relationship.py +128 -0
- lionagi/structure/structure.py +160 -0
- lionagi/tests/__init__.py +0 -0
- lionagi/tests/test_flatten_util.py +426 -0
- lionagi/tools/__init__.py +0 -0
- lionagi/tools/coder.py +1 -0
- lionagi/tools/planner.py +1 -0
- lionagi/tools/prompter.py +1 -0
- lionagi/tools/sandbox.py +1 -0
- lionagi/tools/scorer.py +1 -0
- lionagi/tools/summarizer.py +1 -0
- lionagi/tools/validator.py +1 -0
- lionagi/utils/__init__.py +46 -8
- lionagi/utils/api_util.py +63 -416
- lionagi/utils/call_util.py +347 -0
- lionagi/utils/flat_util.py +540 -0
- lionagi/utils/io_util.py +102 -0
- lionagi/utils/load_utils.py +190 -0
- lionagi/utils/sys_util.py +85 -660
- lionagi/utils/tool_util.py +82 -199
- lionagi/utils/type_util.py +81 -0
- lionagi/version.py +1 -1
- {lionagi-0.0.111.dist-info → lionagi-0.0.113.dist-info}/METADATA +44 -15
- lionagi-0.0.113.dist-info/RECORD +84 -0
- lionagi/api/__init__.py +0 -8
- lionagi/api/oai_config.py +0 -16
- lionagi/session/__init__.py +0 -7
- lionagi/session/session.py +0 -380
- lionagi/utils/doc_util.py +0 -331
- lionagi/utils/log_util.py +0 -86
- lionagi-0.0.111.dist-info/RECORD +0 -20
- {lionagi-0.0.111.dist-info → lionagi-0.0.113.dist-info}/LICENSE +0 -0
- {lionagi-0.0.111.dist-info → lionagi-0.0.113.dist-info}/WHEEL +0 -0
- {lionagi-0.0.111.dist-info → lionagi-0.0.113.dist-info}/top_level.txt +0 -0
lionagi/session/session.py
DELETED
@@ -1,380 +0,0 @@
|
|
1
|
-
import os
|
2
|
-
import aiohttp
|
3
|
-
import asyncio
|
4
|
-
import json
|
5
|
-
from typing import Any
|
6
|
-
|
7
|
-
|
8
|
-
from .conversation import Conversation
|
9
|
-
from ..utils.sys_util import to_list, l_call, al_call
|
10
|
-
from ..utils.log_util import DataLogger
|
11
|
-
from ..utils.api_util import StatusTracker
|
12
|
-
from ..utils.tool_util import ToolManager
|
13
|
-
from ..api.oai_service import OpenAIService
|
14
|
-
from ..api.oai_config import oai_llmconfig
|
15
|
-
|
16
|
-
status_tracker = StatusTracker()
|
17
|
-
OAIService = OpenAIService(api_key=os.getenv('OPENAI_API_KEY'))
|
18
|
-
|
19
|
-
|
20
|
-
class Session():
|
21
|
-
"""
|
22
|
-
A class representing a conversation session with a conversational AI system.
|
23
|
-
|
24
|
-
This class manages the flow of conversation, system settings, and interactions with external tools.
|
25
|
-
|
26
|
-
Attributes:
|
27
|
-
conversation (Conversation):
|
28
|
-
An instance of the Conversation class to manage messages.
|
29
|
-
system (str):
|
30
|
-
The current system setting for the conversation.
|
31
|
-
llmconfig (dict):
|
32
|
-
Configuration settings for the language model.
|
33
|
-
_logger (DataLogger):
|
34
|
-
An instance of the DataLogger class for logging conversation details.
|
35
|
-
api_service:
|
36
|
-
An instance of the API service for making calls to the conversational AI model.
|
37
|
-
_toolmanager (ToolManager):
|
38
|
-
An instance of the ToolManager class for managing external tools.
|
39
|
-
|
40
|
-
Methods:
|
41
|
-
set_dir(dir):
|
42
|
-
Set the directory for logging.
|
43
|
-
|
44
|
-
set_system(system):
|
45
|
-
Set the system for the conversation.
|
46
|
-
|
47
|
-
set_llmconfig(llmconfig):
|
48
|
-
Set the language model configuration.
|
49
|
-
|
50
|
-
set_api_service(api_service):
|
51
|
-
Set the API service for making model calls.
|
52
|
-
|
53
|
-
_output(output, invoke=True, out=True) -> Any:
|
54
|
-
Process the output, invoke tools if needed, and optionally return the output.
|
55
|
-
|
56
|
-
_is_invoked():
|
57
|
-
Checks if the current message indicates the invocation of a function call.
|
58
|
-
|
59
|
-
register_tools(tools, funcs, update=False, new=False, prefix=None, postfix=None):
|
60
|
-
Register tools and their corresponding functions.
|
61
|
-
|
62
|
-
initiate(instruction, system=None, context=None, out=True, name=None, invoke=True, **kwargs) -> Any:
|
63
|
-
Start a new conversation session with the provided instruction.
|
64
|
-
|
65
|
-
followup(instruction, system=None, context=None, out=True, name=None, invoke=True, **kwargs) -> Any:
|
66
|
-
Continue the conversation with the provided instruction.
|
67
|
-
|
68
|
-
auto_followup(self, instruct, num=3, tool_parser=None, **kwags):
|
69
|
-
Automates the follow-up process for a specified number of times or until the session concludes.
|
70
|
-
|
71
|
-
_create_payload_chatcompletion(**kwargs) -> dict:
|
72
|
-
Create a payload for chat completion based on the conversation state and configuration.
|
73
|
-
|
74
|
-
_call_chatcompletion(sleep=0.1, **kwargs) -> None:
|
75
|
-
Make a call to the chat completion API and process the response.
|
76
|
-
|
77
|
-
messages_to_csv(dir=None, filename="_messages.csv", **kwargs) -> None:
|
78
|
-
Save conversation messages to a CSV file.
|
79
|
-
|
80
|
-
log_to_csv(dir=None, filename="_llmlog.csv", **kwargs) -> None:
|
81
|
-
Save conversation logs to a CSV file.
|
82
|
-
"""
|
83
|
-
|
84
|
-
def __init__(self, system, dir=None, llmconfig=oai_llmconfig, api_service=OAIService):
|
85
|
-
"""
|
86
|
-
Initialize a Session object with default or provided settings.
|
87
|
-
|
88
|
-
Parameters:
|
89
|
-
system (str): The initial system setting for the conversation.
|
90
|
-
|
91
|
-
dir (Optional[str]): The directory for logging. Default is None.
|
92
|
-
|
93
|
-
llmconfig (Optional[dict]): Configuration settings for the language model. Default is oai_llmconfig.
|
94
|
-
|
95
|
-
api_service: An instance of the API service for making calls to the conversational AI model.
|
96
|
-
"""
|
97
|
-
self.conversation = Conversation()
|
98
|
-
self.system = system
|
99
|
-
self.llmconfig = llmconfig
|
100
|
-
self._logger = DataLogger(dir=dir)
|
101
|
-
self.api_service = api_service
|
102
|
-
self._toolmanager = ToolManager()
|
103
|
-
|
104
|
-
def set_dir(self, dir):
|
105
|
-
"""
|
106
|
-
Set the directory for logging.
|
107
|
-
|
108
|
-
Parameters:
|
109
|
-
dir (str): The directory path.
|
110
|
-
"""
|
111
|
-
self._logger.dir = dir
|
112
|
-
|
113
|
-
def set_system(self, system):
|
114
|
-
"""
|
115
|
-
Set the system for the conversation.
|
116
|
-
|
117
|
-
Parameters:
|
118
|
-
system (str): The system setting.
|
119
|
-
"""
|
120
|
-
self.conversation.change_system(system)
|
121
|
-
|
122
|
-
def set_llmconfig(self, llmconfig):
|
123
|
-
"""
|
124
|
-
Set the language model configuration.
|
125
|
-
|
126
|
-
Parameters:
|
127
|
-
llmconfig (dict): Configuration settings for the language model.
|
128
|
-
"""
|
129
|
-
self.llmconfig = llmconfig
|
130
|
-
|
131
|
-
def set_api_service(self, api_service):
|
132
|
-
"""
|
133
|
-
Set the API service for making model calls.
|
134
|
-
|
135
|
-
Parameters:
|
136
|
-
api_service: An instance of the API service.
|
137
|
-
"""
|
138
|
-
self.api_service = api_service
|
139
|
-
|
140
|
-
async def _output(self, invoke=True, out=True, tool_parser=None):
|
141
|
-
"""
|
142
|
-
Process the output, invoke tools if needed, and optionally return the output.
|
143
|
-
|
144
|
-
Parameters:
|
145
|
-
invoke (bool): Whether to invoke tools based on the output. Default is True.
|
146
|
-
|
147
|
-
out (bool): Whether to return the output. Default is True.
|
148
|
-
|
149
|
-
Returns:
|
150
|
-
Any: The processed output.
|
151
|
-
"""
|
152
|
-
if invoke:
|
153
|
-
try:
|
154
|
-
tool_uses = json.loads(self.conversation.responses[-1]['content'])
|
155
|
-
if 'function_list' in tool_uses.keys():
|
156
|
-
func_calls = l_call(tool_uses['function_list'], self._toolmanager._get_function_call)
|
157
|
-
|
158
|
-
else:
|
159
|
-
func_calls = l_call(tool_uses['tool_uses'], self._toolmanager._get_function_call)
|
160
|
-
|
161
|
-
outs = await al_call(func_calls, self._toolmanager.ainvoke)
|
162
|
-
if tool_parser:
|
163
|
-
outs = l_call(outs, tool_parser)
|
164
|
-
for out, f in zip(outs, func_calls):
|
165
|
-
response = {"function": f[0], "arguments": f[1], "output": out}
|
166
|
-
self.conversation.add_messages(response=response)
|
167
|
-
|
168
|
-
except:
|
169
|
-
pass
|
170
|
-
|
171
|
-
if out:
|
172
|
-
return self.conversation.responses[-1]['content']
|
173
|
-
|
174
|
-
def _is_invoked(self):
|
175
|
-
"""
|
176
|
-
Checks if the current message indicates the invocation of a function call.
|
177
|
-
|
178
|
-
Returns:
|
179
|
-
bool: True if a function call is detected in the content of the last message, False otherwise.
|
180
|
-
"""
|
181
|
-
msg = self.conversation.messages[-1]
|
182
|
-
try:
|
183
|
-
if json.loads(msg['content']).keys() >= {'function', 'arguments', 'output'}:
|
184
|
-
return True
|
185
|
-
except:
|
186
|
-
return False
|
187
|
-
|
188
|
-
def register_tools(self, tools, funcs, update=False, new=False, prefix=None, postfix=None):
|
189
|
-
"""
|
190
|
-
Register tools and their corresponding functions.
|
191
|
-
|
192
|
-
Parameters:
|
193
|
-
tools (list): The list of tool information dictionaries.
|
194
|
-
|
195
|
-
funcs (list): The list of corresponding functions.
|
196
|
-
|
197
|
-
update (bool): Whether to update existing functions.
|
198
|
-
|
199
|
-
new (bool): Whether to create new registries for existing functions.
|
200
|
-
|
201
|
-
prefix (Optional[str]): A prefix to add to the function names.
|
202
|
-
|
203
|
-
postfix (Optional[str]): A postfix to add to the function names.
|
204
|
-
"""
|
205
|
-
funcs = to_list(funcs)
|
206
|
-
self._toolmanager.register_tools(tools, funcs, update, new, prefix, postfix)
|
207
|
-
|
208
|
-
async def initiate(self, instruction, system=None, context=None, name=None, invoke=True, out=True, tool_parser=None, **kwargs) -> Any:
|
209
|
-
"""
|
210
|
-
Start a new conversation session with the provided instruction.
|
211
|
-
|
212
|
-
Parameters:
|
213
|
-
instruction (Union[str, dict]): The instruction to initiate the conversation.
|
214
|
-
|
215
|
-
system (Optional[str]): The system setting for the conversation. Default is None.
|
216
|
-
|
217
|
-
context (Optional[dict]): Additional context for the instruction. Default is None.
|
218
|
-
|
219
|
-
out (bool): Whether to return the output. Default is True.
|
220
|
-
|
221
|
-
name (Optional[str]): The name associated with the instruction. Default is None.
|
222
|
-
|
223
|
-
invoke (bool): Whether to invoke tools based on the output. Default is True.
|
224
|
-
|
225
|
-
tool_parser (Optional[callable]): A custom tool parser function. Default is None.
|
226
|
-
|
227
|
-
**kwargs: Additional keyword arguments for configuration.
|
228
|
-
|
229
|
-
Returns:
|
230
|
-
Any: The processed output.
|
231
|
-
"""
|
232
|
-
config = {**self.llmconfig, **kwargs}
|
233
|
-
system = system or self.system
|
234
|
-
self.conversation.initiate_conversation(system=system, instruction=instruction, context=context, name=name)
|
235
|
-
await self._call_chatcompletion(**config)
|
236
|
-
|
237
|
-
return await self._output(invoke, out, tool_parser)
|
238
|
-
|
239
|
-
async def followup(self, instruction, system=None, context=None, out=True, name=None, invoke=True, tool_parser=None, **kwargs) -> Any:
|
240
|
-
"""
|
241
|
-
Continue the conversation with the provided instruction.
|
242
|
-
|
243
|
-
Parameters:
|
244
|
-
instruction (Union[str, dict]): The instruction to continue the conversation.
|
245
|
-
|
246
|
-
system (Optional[str]): The system setting for the conversation. Default is None.
|
247
|
-
|
248
|
-
context (Optional[dict]): Additional context for the instruction. Default is None.
|
249
|
-
|
250
|
-
out (bool): Whether to return the output. Default is True.
|
251
|
-
|
252
|
-
name (Optional[str]): The name associated with the instruction. Default is None.
|
253
|
-
|
254
|
-
invoke (bool): Whether to invoke tools based on the output. Default is True.
|
255
|
-
|
256
|
-
tool_parser (Optional[callable]): A custom tool parser function. Default is None.
|
257
|
-
|
258
|
-
**kwargs: Additional keyword arguments for configuration.
|
259
|
-
|
260
|
-
Returns:
|
261
|
-
Any: The processed output.
|
262
|
-
"""
|
263
|
-
if system:
|
264
|
-
self.conversation.change_system(system)
|
265
|
-
self.conversation.add_messages(instruction=instruction, context=context, name=name)
|
266
|
-
config = {**self.llmconfig, **kwargs}
|
267
|
-
await self._call_chatcompletion(**config)
|
268
|
-
|
269
|
-
return await self._output(invoke, out, tool_parser)
|
270
|
-
|
271
|
-
async def auto_followup(self, instruct, num=3, tool_parser=None, **kwargs):
|
272
|
-
"""
|
273
|
-
Automates the follow-up process for a specified number of times or until the session concludes.
|
274
|
-
|
275
|
-
Parameters:
|
276
|
-
instruct (Union[str, dict]): The instruction for the follow-up.
|
277
|
-
|
278
|
-
num (int, optional): The number of times to automatically follow up. Defaults to 3.
|
279
|
-
|
280
|
-
tool_parser (callable, optional): A custom tool parser function. Defaults to None.
|
281
|
-
|
282
|
-
**kwargs: Additional keyword arguments passed to the underlying `followup` method.
|
283
|
-
|
284
|
-
"""
|
285
|
-
cont_ = True
|
286
|
-
while num > 0 and cont_ is True:
|
287
|
-
await self.followup(instruct, tool_parser=tool_parser, tool_choice="auto", **kwargs)
|
288
|
-
num -= 1
|
289
|
-
cont_ = True if self._is_invoked() else False
|
290
|
-
if num == 0:
|
291
|
-
await self.followup(instruct, **kwargs)
|
292
|
-
|
293
|
-
def _create_payload_chatcompletion(self, **kwargs):
|
294
|
-
"""
|
295
|
-
Create a payload for chat completion based on the conversation state and configuration.
|
296
|
-
|
297
|
-
Parameters:
|
298
|
-
**kwargs: Additional keyword arguments for configuration.
|
299
|
-
|
300
|
-
Returns:
|
301
|
-
dict: The payload for chat completion.
|
302
|
-
"""
|
303
|
-
# currently only openai chat completions are supported
|
304
|
-
messages = self.conversation.messages
|
305
|
-
config = {**self.llmconfig, **kwargs}
|
306
|
-
payload = {
|
307
|
-
"messages": messages,
|
308
|
-
"model": config.get('model'),
|
309
|
-
"frequency_penalty": config.get('frequency_penalty'),
|
310
|
-
"n": config.get('n'),
|
311
|
-
"presence_penalty": config.get('presence_penalty'),
|
312
|
-
"response_format": config.get('response_format'),
|
313
|
-
"temperature": config.get('temperature'),
|
314
|
-
"top_p": config.get('top_p'),
|
315
|
-
}
|
316
|
-
|
317
|
-
for key in ["seed", "stop", "stream", "tools", "tool_choice", "user", "max_tokens"]:
|
318
|
-
if bool(config[key]) is True and str(config[key]) != "none":
|
319
|
-
payload.update({key: config[key]})
|
320
|
-
return payload
|
321
|
-
|
322
|
-
async def _call_chatcompletion(self, sleep=0.1, **kwargs):
|
323
|
-
"""
|
324
|
-
Make a call to the chat completion API and process the response.
|
325
|
-
|
326
|
-
Parameters:
|
327
|
-
sleep (float): The sleep duration after making the API call. Default is 0.1.
|
328
|
-
|
329
|
-
**kwargs: Additional keyword arguments for configuration.
|
330
|
-
"""
|
331
|
-
endpoint = f"chat/completions"
|
332
|
-
try:
|
333
|
-
async with aiohttp.ClientSession() as session:
|
334
|
-
payload = self._create_payload_chatcompletion(**kwargs)
|
335
|
-
completion = await self.api_service.call_api(
|
336
|
-
session, endpoint, payload)
|
337
|
-
if "choices" in completion:
|
338
|
-
self._logger({"input": payload, "output": completion})
|
339
|
-
self.conversation.add_messages(response=completion['choices'][0])
|
340
|
-
self.conversation.responses.append(self.conversation.messages[-1])
|
341
|
-
self.conversation.response_counts += 1
|
342
|
-
await asyncio.sleep(sleep)
|
343
|
-
status_tracker.num_tasks_succeeded += 1
|
344
|
-
else:
|
345
|
-
status_tracker.num_tasks_failed += 1
|
346
|
-
except Exception as e:
|
347
|
-
status_tracker.num_tasks_failed += 1
|
348
|
-
raise e
|
349
|
-
|
350
|
-
def messages_to_csv(self, dir=None, filename="_messages.csv", **kwargs):
|
351
|
-
"""
|
352
|
-
Save conversation messages to a CSV file.
|
353
|
-
|
354
|
-
Parameters:
|
355
|
-
dir (Optional[str]): The directory path for saving the CSV file. Default is None.
|
356
|
-
|
357
|
-
filename (Optional[str]): The filename for the CSV file. Default is "_messages.csv".
|
358
|
-
|
359
|
-
**kwargs: Additional keyword arguments for CSV file settings.
|
360
|
-
"""
|
361
|
-
dir = dir or self._logger.dir
|
362
|
-
if dir is None:
|
363
|
-
raise ValueError("No directory specified.")
|
364
|
-
self.conversation.msg.to_csv(dir=dir, filename=filename, **kwargs)
|
365
|
-
|
366
|
-
def log_to_csv(self, dir=None, filename="_llmlog.csv", **kwargs):
|
367
|
-
"""
|
368
|
-
Save conversation logs to a CSV file.
|
369
|
-
|
370
|
-
Parameters:
|
371
|
-
dir (Optional[str]): The directory path for saving the CSV file. Default is None.
|
372
|
-
|
373
|
-
filename (Optional[str]): The filename for the CSV file. Default is "_llmlog.csv".
|
374
|
-
|
375
|
-
**kwargs: Additional keyword arguments for CSV file settings.
|
376
|
-
"""
|
377
|
-
dir = dir or self._logger.dir
|
378
|
-
if dir is None:
|
379
|
-
raise ValueError("No directory specified.")
|
380
|
-
self._logger.to_csv(dir=dir, filename=filename, **kwargs)
|