lionagi 0.0.114__py3-none-any.whl → 0.0.116__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +7 -4
- lionagi/bridge/__init__.py +19 -4
- lionagi/bridge/langchain.py +23 -3
- lionagi/bridge/llama_index.py +5 -3
- lionagi/configs/__init__.py +1 -1
- lionagi/configs/oai_configs.py +88 -1
- lionagi/core/__init__.py +6 -9
- lionagi/core/conversations/__init__.py +5 -0
- lionagi/core/conversations/conversation.py +107 -0
- lionagi/core/flows/__init__.py +8 -0
- lionagi/core/flows/flow.py +8 -0
- lionagi/core/flows/flow_util.py +62 -0
- lionagi/core/instruction_set/__init__.py +5 -0
- lionagi/core/instruction_set/instruction_sets.py +7 -0
- lionagi/core/sessions/__init__.py +5 -0
- lionagi/core/sessions/sessions.py +187 -0
- lionagi/endpoints/__init__.py +5 -0
- lionagi/endpoints/assistants.py +0 -0
- lionagi/endpoints/audio.py +17 -0
- lionagi/endpoints/chatcompletion.py +54 -0
- lionagi/endpoints/embeddings.py +0 -0
- lionagi/endpoints/finetune.py +0 -0
- lionagi/endpoints/image.py +0 -0
- lionagi/endpoints/moderation.py +0 -0
- lionagi/endpoints/vision.py +0 -0
- lionagi/{loader → loaders}/__init__.py +7 -1
- lionagi/{loader → loaders}/chunker.py +6 -12
- lionagi/{utils/load_utils.py → loaders/load_util.py} +47 -6
- lionagi/{loader → loaders}/reader.py +4 -12
- lionagi/messages/__init__.py +11 -0
- lionagi/messages/instruction.py +15 -0
- lionagi/messages/message.py +110 -0
- lionagi/messages/response.py +33 -0
- lionagi/messages/system.py +12 -0
- lionagi/objs/__init__.py +10 -6
- lionagi/objs/abc_objs.py +39 -0
- lionagi/objs/async_queue.py +135 -0
- lionagi/objs/messenger.py +70 -148
- lionagi/objs/status_tracker.py +37 -0
- lionagi/objs/{tool_registry.py → tool_manager.py} +8 -6
- lionagi/schema/__init__.py +3 -3
- lionagi/schema/base_node.py +251 -0
- lionagi/schema/base_tool.py +8 -3
- lionagi/schema/data_logger.py +2 -3
- lionagi/schema/data_node.py +37 -0
- lionagi/services/__init__.py +1 -4
- lionagi/services/base_api_service.py +15 -5
- lionagi/services/oai.py +2 -2
- lionagi/services/openrouter.py +2 -3
- lionagi/structures/graph.py +96 -0
- lionagi/{structure → structures}/relationship.py +10 -2
- lionagi/structures/structure.py +102 -0
- lionagi/tests/test_api_util.py +46 -0
- lionagi/tests/test_call_util.py +115 -0
- lionagi/tests/test_convert_util.py +202 -0
- lionagi/tests/test_encrypt_util.py +33 -0
- lionagi/tests/{test_flatten_util.py → test_flat_util.py} +1 -1
- lionagi/tests/test_io_util.py +0 -0
- lionagi/tests/test_sys_util.py +0 -0
- lionagi/tools/__init__.py +5 -0
- lionagi/tools/tool_util.py +7 -0
- lionagi/utils/__init__.py +55 -35
- lionagi/utils/api_util.py +19 -17
- lionagi/utils/call_util.py +2 -1
- lionagi/utils/convert_util.py +229 -0
- lionagi/utils/encrypt_util.py +16 -0
- lionagi/utils/flat_util.py +38 -0
- lionagi/utils/io_util.py +2 -2
- lionagi/utils/sys_util.py +45 -10
- lionagi/version.py +1 -1
- {lionagi-0.0.114.dist-info → lionagi-0.0.116.dist-info}/METADATA +2 -2
- lionagi-0.0.116.dist-info/RECORD +110 -0
- lionagi/core/conversations.py +0 -108
- lionagi/core/flows.py +0 -1
- lionagi/core/instruction_sets.py +0 -1
- lionagi/core/messages.py +0 -166
- lionagi/core/sessions.py +0 -297
- lionagi/schema/base_schema.py +0 -252
- lionagi/services/chatcompletion.py +0 -48
- lionagi/services/service_objs.py +0 -282
- lionagi/structure/structure.py +0 -160
- lionagi/tools/coder.py +0 -1
- lionagi/tools/sandbox.py +0 -1
- lionagi/utils/tool_util.py +0 -92
- lionagi/utils/type_util.py +0 -81
- lionagi-0.0.114.dist-info/RECORD +0 -84
- /lionagi/configs/{openrouter_config.py → openrouter_configs.py} +0 -0
- /lionagi/{datastore → datastores}/__init__.py +0 -0
- /lionagi/{datastore → datastores}/chroma.py +0 -0
- /lionagi/{datastore → datastores}/deeplake.py +0 -0
- /lionagi/{datastore → datastores}/elasticsearch.py +0 -0
- /lionagi/{datastore → datastores}/lantern.py +0 -0
- /lionagi/{datastore → datastores}/pinecone.py +0 -0
- /lionagi/{datastore → datastores}/postgres.py +0 -0
- /lionagi/{datastore → datastores}/qdrant.py +0 -0
- /lionagi/{structure → structures}/__init__.py +0 -0
- {lionagi-0.0.114.dist-info → lionagi-0.0.116.dist-info}/LICENSE +0 -0
- {lionagi-0.0.114.dist-info → lionagi-0.0.116.dist-info}/WHEEL +0 -0
- {lionagi-0.0.114.dist-info → lionagi-0.0.116.dist-info}/top_level.txt +0 -0
lionagi/core/sessions.py
DELETED
@@ -1,297 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
from typing import Any
|
3
|
-
from dotenv import load_dotenv
|
4
|
-
|
5
|
-
from ..schema import DataLogger
|
6
|
-
from ..utils import lcall, alcall
|
7
|
-
from ..services import OpenAIService, ChatCompletion
|
8
|
-
from ..core.conversations import Conversation
|
9
|
-
from ..objs.tool_registry import ToolManager
|
10
|
-
from ..configs.oai_configs import oai_schema
|
11
|
-
|
12
|
-
load_dotenv()
|
13
|
-
OAIService = OpenAIService()
|
14
|
-
|
15
|
-
|
16
|
-
class Session:
|
17
|
-
"""
|
18
|
-
The Session class is responsible for managing a conversation session with a given system,
|
19
|
-
handling the logging of data, and invoking tools as part of the conversation.
|
20
|
-
|
21
|
-
Attributes:
|
22
|
-
conversation (Conversation):
|
23
|
-
An object to manage the conversation flow and history.
|
24
|
-
system (str):
|
25
|
-
The name of the system with which the conversation is happening.
|
26
|
-
llmconfig (dict):
|
27
|
-
Configuration for the language model.
|
28
|
-
logger_ (DataLogger):
|
29
|
-
An object for logging conversation data.
|
30
|
-
service (OpenAIService):
|
31
|
-
A service object for interacting with OpenAI APIs.
|
32
|
-
tool_manager (ToolManager):
|
33
|
-
An object to manage the registration and invocation of tools.
|
34
|
-
"""
|
35
|
-
|
36
|
-
def __init__(
|
37
|
-
self, system, dir=None, llmconfig=oai_schema['chat']['config'],
|
38
|
-
service=OAIService
|
39
|
-
):
|
40
|
-
"""
|
41
|
-
Initializes the Session object.
|
42
|
-
|
43
|
-
Parameters:
|
44
|
-
system (str): The name of the system with which the session is initiated.
|
45
|
-
|
46
|
-
dir (str, optional): The directory for saving logs. Defaults to None.
|
47
|
-
|
48
|
-
llmconfig (dict): Configuration for the language model. Defaults to chat config schema.
|
49
|
-
|
50
|
-
service (OpenAIService): The service object for API interactions. Defaults to an instance of OpenAIService.
|
51
|
-
"""
|
52
|
-
|
53
|
-
self.conversation = Conversation()
|
54
|
-
self.system = system
|
55
|
-
self.llmconfig = llmconfig
|
56
|
-
self.logger_ = DataLogger(dir=dir)
|
57
|
-
self.service = service
|
58
|
-
self.tool_manager = ToolManager()
|
59
|
-
|
60
|
-
def set_dir(self, dir):
|
61
|
-
"""
|
62
|
-
Sets the directory where data logs should be saved.
|
63
|
-
|
64
|
-
Parameters:
|
65
|
-
dir (str): The path to the directory for saving logs.
|
66
|
-
"""
|
67
|
-
self.logger_.dir = dir
|
68
|
-
|
69
|
-
def set_system(self, system):
|
70
|
-
"""
|
71
|
-
Changes the system associated with the conversation.
|
72
|
-
|
73
|
-
Parameters:
|
74
|
-
system (str): The name of the new system for the conversation.
|
75
|
-
"""
|
76
|
-
self.conversation.change_system(system)
|
77
|
-
|
78
|
-
def set_llmconfig(self, llmconfig):
|
79
|
-
"""
|
80
|
-
Updates the language model configuration.
|
81
|
-
|
82
|
-
Parameters:
|
83
|
-
llmconfig (dict): The new configuration for the language model.
|
84
|
-
"""
|
85
|
-
self.llmconfig = llmconfig
|
86
|
-
|
87
|
-
def set_service(self, service):
|
88
|
-
"""
|
89
|
-
Sets the service object used for API interactions.
|
90
|
-
|
91
|
-
Parameters:
|
92
|
-
service (OpenAIService): The new service object.
|
93
|
-
"""
|
94
|
-
self.service = service
|
95
|
-
|
96
|
-
async def _output(self, invoke=True, out=True):
|
97
|
-
"""
|
98
|
-
Processes the output from the conversation, possibly invoking tools and returning the latest response.
|
99
|
-
|
100
|
-
Parameters:
|
101
|
-
invoke (bool): Indicates whether to invoke tools based on the latest response. Defaults to True.
|
102
|
-
|
103
|
-
out (bool): Determines whether to return the latest response content. Defaults to True.
|
104
|
-
|
105
|
-
Returns:
|
106
|
-
The content of the latest response if out is True. Otherwise, returns None.
|
107
|
-
"""
|
108
|
-
if invoke:
|
109
|
-
try:
|
110
|
-
# func, args = self.tool_manager._get_function_call(self.conversation.responses[-1]['content'])
|
111
|
-
# outs = await self.tool_manager.invoke(func, args)
|
112
|
-
# self.conversation.add_messages(response=outs)
|
113
|
-
|
114
|
-
tool_uses = json.loads(self.conversation.responses[-1]['content'])
|
115
|
-
if 'function_list' in tool_uses.keys():
|
116
|
-
func_calls = lcall(tool_uses['function_list'], self.tool_manager._get_function_call)
|
117
|
-
else:
|
118
|
-
func_calls = lcall(tool_uses['tool_uses'], self.tool_manager._get_function_call)
|
119
|
-
|
120
|
-
outs = await alcall(func_calls, self.tool_manager.invoke)
|
121
|
-
for out, f in zip(outs, func_calls):
|
122
|
-
response = {"function": f[0], "arguments": f[1], "output": out}
|
123
|
-
self.conversation.add_messages(response=response)
|
124
|
-
|
125
|
-
except:
|
126
|
-
pass
|
127
|
-
if out:
|
128
|
-
return self.conversation.responses[-1]['content']
|
129
|
-
|
130
|
-
def _is_invoked(self):
|
131
|
-
"""
|
132
|
-
Checks if the last message in the conversation indicates a function call result.
|
133
|
-
|
134
|
-
Returns:
|
135
|
-
bool: True if the last message is a function call result, False otherwise.
|
136
|
-
"""
|
137
|
-
msg = self.conversation.messages[-1]
|
138
|
-
try:
|
139
|
-
if json.loads(msg['content']).keys() >= {'function', 'arguments', 'output'}:
|
140
|
-
return True
|
141
|
-
except:
|
142
|
-
return False
|
143
|
-
|
144
|
-
def register_tools(self, tools): #, update=False, new=False, prefix=None, postfix=None):
|
145
|
-
"""
|
146
|
-
Registers a list of tools to the tool manager and updates the language model configuration.
|
147
|
-
|
148
|
-
Parameters:
|
149
|
-
tools: A single tool or a list of tools to be registered.
|
150
|
-
|
151
|
-
update (bool): If True, update existing tools. Defaults to False.
|
152
|
-
|
153
|
-
new (bool): If True, add as new tools. Defaults to False.
|
154
|
-
|
155
|
-
prefix: A prefix added to all tool names. Defaults to None.
|
156
|
-
|
157
|
-
postfix: A postfix added to all tool names. Defaults to None.
|
158
|
-
"""
|
159
|
-
if not isinstance(tools, list):
|
160
|
-
tools=[tools]
|
161
|
-
self.tool_manager.register_tools(tools=tools) #, update=update, new=new, prefix=prefix, postfix=postfix)
|
162
|
-
# tools_schema = lcall(tools, lambda tool: tool.to_dict()['schema_'])
|
163
|
-
# if self.llmconfig['tools'] is None:
|
164
|
-
# self.llmconfig['tools'] = tools_schema
|
165
|
-
# else:
|
166
|
-
# self.llmconfig['tools'] += tools_schema
|
167
|
-
|
168
|
-
async def initiate(self, instruction, system=None, context=None,
|
169
|
-
name=None, invoke=True, out=True, **kwargs) -> Any:
|
170
|
-
"""
|
171
|
-
Initiates a conversation with an instruction and possibly additional context.
|
172
|
-
|
173
|
-
Parameters:
|
174
|
-
instruction (str): The initial instruction for the conversation.
|
175
|
-
|
176
|
-
system (str, optional): The name of the system to be used. If None, defaults to current system.
|
177
|
-
|
178
|
-
context (str, optional): Additional context for the conversation. Defaults to None.
|
179
|
-
|
180
|
-
name (str, optional): The name associated with the conversation. Defaults to None.
|
181
|
-
|
182
|
-
invoke (bool): Indicates whether to invoke tools. Defaults to True.
|
183
|
-
|
184
|
-
out (bool): Determines whether to return the latest response content. Defaults to True.
|
185
|
-
|
186
|
-
**kwargs: Additional keyword arguments for language model configuration.
|
187
|
-
|
188
|
-
Returns:
|
189
|
-
The output of the conversation if out is True, otherwise None.
|
190
|
-
"""
|
191
|
-
if self.tool_manager.registry != {}:
|
192
|
-
if 'tools' not in kwargs:
|
193
|
-
tool_kwarg = {"tools": self.tool_manager.to_tool_schema()}
|
194
|
-
kwargs = {**tool_kwarg, **kwargs}
|
195
|
-
|
196
|
-
config = {**self.llmconfig, **kwargs}
|
197
|
-
system = system or self.system
|
198
|
-
self.conversation.initiate_conversation(system=system, instruction=instruction, context=context, name=name)
|
199
|
-
await self.call_chatcompletion(**config)
|
200
|
-
|
201
|
-
return await self._output(invoke, out)
|
202
|
-
|
203
|
-
async def followup(self, instruction, system=None, context=None,
|
204
|
-
out=True, name=None, invoke=True, **kwargs) -> Any:
|
205
|
-
"""
|
206
|
-
Continues the conversation with a follow-up instruction.
|
207
|
-
|
208
|
-
Parameters:
|
209
|
-
instruction (str): The follow-up instruction for the conversation.
|
210
|
-
|
211
|
-
system (str, optional): The name of the system to be used. If None, defaults to current system.
|
212
|
-
|
213
|
-
context (str, optional): Additional context for the conversation. Defaults to None.
|
214
|
-
|
215
|
-
out (bool): Determines whether to return the latest response content. Defaults to True.
|
216
|
-
|
217
|
-
name (str, optional): The name associated with the conversation. Defaults to None.
|
218
|
-
|
219
|
-
invoke (bool): Indicates whether to invoke tools. Defaults to True.
|
220
|
-
|
221
|
-
**kwargs: Additional keyword arguments for language model configuration.
|
222
|
-
|
223
|
-
Returns:
|
224
|
-
The output of the conversation if out is True, otherwise None.
|
225
|
-
"""
|
226
|
-
if system:
|
227
|
-
self.conversation.change_system(system)
|
228
|
-
self.conversation.add_messages(instruction=instruction, context=context, name=name)
|
229
|
-
if self.tool_manager.registry != {}:
|
230
|
-
if 'tools' not in kwargs:
|
231
|
-
tool_kwarg = {"tools": self.tool_manager.to_tool_schema()}
|
232
|
-
kwargs = {**tool_kwarg, **kwargs}
|
233
|
-
config = {**self.llmconfig, **kwargs}
|
234
|
-
await self.call_chatcompletion(**config)
|
235
|
-
|
236
|
-
return await self._output(invoke, out)
|
237
|
-
|
238
|
-
async def auto_followup(self, instruct, num=3, **kwargs):
|
239
|
-
"""
|
240
|
-
Automatically generates follow-up messages based on whether the last response invoked a tool.
|
241
|
-
|
242
|
-
Parameters:
|
243
|
-
instruct (str): The instruction to pass for follow-up.
|
244
|
-
|
245
|
-
num (int): The number of follow-ups to attempt. Defaults to 3.
|
246
|
-
|
247
|
-
**kwargs: Additional keyword arguments for the follow-up process.
|
248
|
-
"""
|
249
|
-
if self.tool_manager.registry != {}:
|
250
|
-
if 'tools' not in kwargs:
|
251
|
-
tool_kwarg = {"tools": self.tool_manager.to_tool_schema()}
|
252
|
-
kwargs = {**tool_kwarg, **kwargs}
|
253
|
-
|
254
|
-
cont_ = True
|
255
|
-
while num > 0 and cont_ is True:
|
256
|
-
await self.followup(instruct, tool_choice="auto", **kwargs)
|
257
|
-
num -= 1
|
258
|
-
cont_ = True if self._is_invoked() else False
|
259
|
-
if num == 0:
|
260
|
-
await self.followup(instruct, **kwargs)
|
261
|
-
|
262
|
-
def messages_to_csv(self, dir=None, filename="messages.csv", **kwargs):
|
263
|
-
"""
|
264
|
-
Exports the conversation messages to a CSV file.
|
265
|
-
|
266
|
-
Parameters:
|
267
|
-
dir (str, optional): The directory where the CSV should be saved. Defaults to the logger's directory.
|
268
|
-
|
269
|
-
filename (str): The name of the CSV file. Defaults to "messages.csv".
|
270
|
-
|
271
|
-
**kwargs: Additional keyword arguments passed to the CSV writing function.
|
272
|
-
|
273
|
-
Raises:
|
274
|
-
ValueError: If no directory is specified.
|
275
|
-
"""
|
276
|
-
dir = dir or self.logger_.dir
|
277
|
-
if dir is None:
|
278
|
-
raise ValueError("No directory specified.")
|
279
|
-
self.conversation.msg.to_csv(dir=dir, filename=filename, **kwargs)
|
280
|
-
|
281
|
-
def log_to_csv(self, dir=None, filename="llmlog.csv", **kwargs):
|
282
|
-
dir = dir or self.logger_.dir
|
283
|
-
if dir is None:
|
284
|
-
raise ValueError("No directory specified.")
|
285
|
-
self.logger_.to_csv(dir=dir, filename=filename, **kwargs)
|
286
|
-
|
287
|
-
async def call_chatcompletion(self, schema=oai_schema['chat'], **kwargs):
|
288
|
-
payload = ChatCompletion.create_payload(messages=self.conversation.messages, schema=schema, llmconfig=self.llmconfig,**kwargs)
|
289
|
-
completion = await self.service.serve(payload=payload)
|
290
|
-
if "choices" in completion:
|
291
|
-
self.logger_({"input":payload, "output": completion})
|
292
|
-
self.conversation.add_messages(response=completion['choices'][0])
|
293
|
-
self.conversation.responses.append(self.conversation.messages[-1])
|
294
|
-
self.conversation.response_counts += 1
|
295
|
-
self.service.status_tracker.num_tasks_succeeded += 1
|
296
|
-
else:
|
297
|
-
self.service.status_tracker.num_tasks_failed += 1
|
lionagi/schema/base_schema.py
DELETED
@@ -1,252 +0,0 @@
|
|
1
|
-
import json
|
2
|
-
from typing import Any, Dict, Optional, TypeVar, Type, List, Callable, Union
|
3
|
-
from pydantic import BaseModel, Field, AliasChoices
|
4
|
-
|
5
|
-
from ..utils.sys_util import create_id
|
6
|
-
|
7
|
-
T = TypeVar('T', bound='BaseNode')
|
8
|
-
|
9
|
-
|
10
|
-
class BaseNode(BaseModel):
|
11
|
-
"""
|
12
|
-
BaseNode: A foundational building block for representing a node in a graph-like structure.
|
13
|
-
|
14
|
-
Attributes:
|
15
|
-
id_ (str):
|
16
|
-
Unique identifier for the node, aliased as 'node_id'.
|
17
|
-
content (Optional[Any]):
|
18
|
-
Content or value the node represents.
|
19
|
-
metadata (Dict[str, Any]):
|
20
|
-
A dictionary of metadata related to the node.
|
21
|
-
label (Optional[str]):
|
22
|
-
A label for categorizing or identifying the node.
|
23
|
-
related_nodes (List[str]):
|
24
|
-
A list of identifiers for nodes related to this node.
|
25
|
-
"""
|
26
|
-
id_: str = Field(default_factory=lambda: str(create_id()), alias="node_id")
|
27
|
-
content: Union[str, Dict[str, Any], None, Any] = Field(default=None,
|
28
|
-
validation_alias=AliasChoices('text', 'page_content', 'chunk_content'))
|
29
|
-
metadata: Dict[str, Any] = Field(default_factory=dict)
|
30
|
-
label: Optional[str] = None
|
31
|
-
related_nodes: List[str] = Field(default_factory=list)
|
32
|
-
|
33
|
-
class Config:
|
34
|
-
extra = 'allow'
|
35
|
-
populate_by_name = True
|
36
|
-
validate_assignment = True
|
37
|
-
str_strip_whitespace = True
|
38
|
-
|
39
|
-
def to_json(self) -> str:
|
40
|
-
"""Converts the node instance into JSON string representation."""
|
41
|
-
return self.model_dump_json(by_alias=True)
|
42
|
-
|
43
|
-
@classmethod
|
44
|
-
def from_json(cls: Type[T], json_str: str, **kwargs) -> T:
|
45
|
-
"""
|
46
|
-
Creates a node instance from a JSON string.
|
47
|
-
|
48
|
-
Parameters:
|
49
|
-
json_str (str): The JSON string representing a node.
|
50
|
-
|
51
|
-
**kwargs: Additional keyword arguments to pass to json.loads.
|
52
|
-
|
53
|
-
Returns:
|
54
|
-
An instance of BaseNode.
|
55
|
-
|
56
|
-
Raises:
|
57
|
-
ValueError: If the provided string is not valid JSON.
|
58
|
-
"""
|
59
|
-
try:
|
60
|
-
data = json.loads(json_str, **kwargs)
|
61
|
-
return cls(**data)
|
62
|
-
except json.JSONDecodeError as e:
|
63
|
-
raise ValueError("Invalid JSON string provided for deserialization.") from e
|
64
|
-
|
65
|
-
def to_dict(self) -> Dict[str, Any]:
|
66
|
-
"""Converts the node instance into a dictionary representation."""
|
67
|
-
return self.model_dump(by_alias=True)
|
68
|
-
|
69
|
-
@classmethod
|
70
|
-
def from_dict(cls, data: Dict[str, Any]) -> T:
|
71
|
-
"""Creates a node instance from a dictionary."""
|
72
|
-
return cls(**data)
|
73
|
-
|
74
|
-
def copy(self, deep: bool = True, n: int = 1) -> T:
|
75
|
-
"""
|
76
|
-
Creates a copy of the node instance.
|
77
|
-
|
78
|
-
Parameters:
|
79
|
-
deep (bool): Whether to make a deep copy.
|
80
|
-
|
81
|
-
n (int): Number of copies to create.
|
82
|
-
|
83
|
-
Returns:
|
84
|
-
A copy or list of copies of the BaseNode instance.
|
85
|
-
"""
|
86
|
-
copies = [self.copy(deep=deep) for _ in range(n)]
|
87
|
-
return copies[0] if n == 1 else copies
|
88
|
-
|
89
|
-
def merge_metadata(self, other_metadata: Dict[str, Any], overwrite: bool = True) -> None:
|
90
|
-
"""
|
91
|
-
Merges another metadata dictionary into the node's metadata.
|
92
|
-
|
93
|
-
Parameters:
|
94
|
-
other_metadata (Dict[str, Any]): The metadata to merge in.
|
95
|
-
|
96
|
-
overwrite (bool): Whether to overwrite existing keys in the metadata.
|
97
|
-
"""
|
98
|
-
if not overwrite:
|
99
|
-
other_metadata = {k: v for k, v in other_metadata.items() if k not in self.metadata}
|
100
|
-
self.metadata.update(other_metadata)
|
101
|
-
|
102
|
-
def set_meta(self, metadata_: Dict[str, Any]) -> None:
|
103
|
-
self.metadata = metadata_
|
104
|
-
|
105
|
-
def get_meta(self) -> Dict[str, Any]:
|
106
|
-
return self.metadata
|
107
|
-
|
108
|
-
def set_content(self, content: Optional[Any]) -> None:
|
109
|
-
self.content = content
|
110
|
-
|
111
|
-
def get_content(self) -> Optional[Any]:
|
112
|
-
return self.content
|
113
|
-
|
114
|
-
def set_id(self, id_: str) -> None:
|
115
|
-
self.id_ = id_
|
116
|
-
|
117
|
-
def get_id(self) -> str:
|
118
|
-
return self.id_
|
119
|
-
|
120
|
-
def update_meta(self, **kwargs) -> None:
|
121
|
-
self.metadata.update(kwargs)
|
122
|
-
|
123
|
-
def add_related_node(self, node_id: str) -> None:
|
124
|
-
if node_id not in self.related_nodes:
|
125
|
-
self.related_nodes.append(node_id)
|
126
|
-
|
127
|
-
def remove_related_node(self, node_id: str) -> None:
|
128
|
-
self.related_nodes = [id_ for id_ in self.related_nodes if id_ != node_id]
|
129
|
-
|
130
|
-
def __eq__(self, other: object) -> bool:
|
131
|
-
if not isinstance(other, T):
|
132
|
-
return NotImplemented
|
133
|
-
return self.model_dump() == other.model_dump()
|
134
|
-
|
135
|
-
# def __str__(self) -> str:
|
136
|
-
# """Returns a simple string representation of the BaseNode."""
|
137
|
-
# return f"BaseNode(id={self.id_}, label={self.label})"
|
138
|
-
|
139
|
-
# def __repr__(self) -> str:
|
140
|
-
# """Returns a detailed string representation of the BaseNode."""
|
141
|
-
# return f"BaseNode(id={self.id_}, content={self.content}, metadata={self.metadata}, label={self.label})"
|
142
|
-
|
143
|
-
# Utility Methods
|
144
|
-
def is_empty(self) -> bool:
|
145
|
-
return not self.content and not self.metadata
|
146
|
-
|
147
|
-
def has_label(self, label: str) -> bool:
|
148
|
-
return self.label == label
|
149
|
-
|
150
|
-
def is_metadata_key_present(self, key: str) -> bool:
|
151
|
-
return key in self.metadata
|
152
|
-
|
153
|
-
|
154
|
-
class DataNode(BaseNode):
|
155
|
-
|
156
|
-
def to_llama_index(self, **kwargs):
|
157
|
-
# to llama_index textnode
|
158
|
-
from lionagi.bridge.llama_index import to_llama_index_textnode
|
159
|
-
return to_llama_index_textnode(self, **kwargs)
|
160
|
-
|
161
|
-
def to_langchain(self, **kwargs):
|
162
|
-
# to langchain document
|
163
|
-
from lionagi.bridge.langchain import to_langchain_document
|
164
|
-
return to_langchain_document(self, **kwargs)
|
165
|
-
|
166
|
-
@classmethod
|
167
|
-
def from_llama_index(cls, llama_node: Any, **kwargs):
|
168
|
-
llama_dict = llama_node.to_dict(**kwargs)
|
169
|
-
return cls.from_dict(llama_dict)
|
170
|
-
|
171
|
-
@classmethod
|
172
|
-
def from_langchain(cls, lc_doc: Any):
|
173
|
-
info_json = lc_doc.to_json()
|
174
|
-
info_node = {'lc_id': info_json['id']}
|
175
|
-
info_node = {**info_node, **info_json['kwargs']}
|
176
|
-
return cls(**info_node)
|
177
|
-
|
178
|
-
# def __repr__(self) -> str:
|
179
|
-
# return f"DataNode(id={self.id_}, content={self.content}, metadata={self.metadata}, label={self.label})"
|
180
|
-
|
181
|
-
# def __str__(self) -> str:
|
182
|
-
# return f"DataNode(id={self.id_}, label={self.label})"
|
183
|
-
|
184
|
-
|
185
|
-
class File(DataNode):
|
186
|
-
|
187
|
-
...
|
188
|
-
|
189
|
-
|
190
|
-
class Chunk(DataNode):
|
191
|
-
|
192
|
-
...
|
193
|
-
|
194
|
-
|
195
|
-
class Message(BaseNode):
|
196
|
-
"""
|
197
|
-
Message: A specialized type of BaseNode for handling messages.
|
198
|
-
|
199
|
-
This class represents a message node, extending the BaseNode with additional
|
200
|
-
attributes specific to messages, such as role and name, and provides methods
|
201
|
-
for message-specific operations.
|
202
|
-
|
203
|
-
Attributes:
|
204
|
-
role (Optional[str]):
|
205
|
-
The role of the message, e.g., 'sender', 'receiver'.
|
206
|
-
name (Optional[str]):
|
207
|
-
The name associated with the message, e.g., a user name or system name.
|
208
|
-
"""
|
209
|
-
|
210
|
-
role: Optional[str] = None
|
211
|
-
name: Optional[str] = None
|
212
|
-
|
213
|
-
def _to_message(self):
|
214
|
-
"""
|
215
|
-
Converts the message node to a dictionary representation suitable for messaging purposes.
|
216
|
-
|
217
|
-
The method serializes the content attribute to a JSON string if it is a dictionary.
|
218
|
-
Otherwise, it keeps the content as is.
|
219
|
-
|
220
|
-
Returns:
|
221
|
-
A dictionary representing the message with 'role' and 'content' keys.
|
222
|
-
"""
|
223
|
-
out = {
|
224
|
-
"role": self.role,
|
225
|
-
"content": json.dumps(self.content) if isinstance(self.content, dict) else self.content
|
226
|
-
}
|
227
|
-
return out
|
228
|
-
|
229
|
-
def _create_role_message(self, role_: str,
|
230
|
-
content: Any,
|
231
|
-
content_key: str,
|
232
|
-
name: Optional[str] = None
|
233
|
-
) -> None:
|
234
|
-
"""
|
235
|
-
Creates a message with a specific role, content, and an optional name.
|
236
|
-
|
237
|
-
This method sets up the message node with the specified role, content, and name. The content
|
238
|
-
is stored in a dictionary under the provided content_key.
|
239
|
-
|
240
|
-
Parameters:
|
241
|
-
role_ (str): The role of the message.
|
242
|
-
|
243
|
-
content (Any): The content of the message.
|
244
|
-
|
245
|
-
content_key (str): The key under which the content will be stored.
|
246
|
-
|
247
|
-
name (Optional[str]): The name associated with the message. Defaults to the role if not provided.
|
248
|
-
"""
|
249
|
-
self.role = role_
|
250
|
-
self.content = {content_key: content}
|
251
|
-
self.name = name or role_
|
252
|
-
|
@@ -1,48 +0,0 @@
|
|
1
|
-
import abc
|
2
|
-
|
3
|
-
|
4
|
-
class BaseEndpoint(abc.ABC):
|
5
|
-
endpoint: str = abc.abstractproperty()
|
6
|
-
|
7
|
-
@abc.abstractmethod
|
8
|
-
def create_payload(self, **kwargs):
|
9
|
-
"""
|
10
|
-
Create a payload for the request based on configuration.
|
11
|
-
|
12
|
-
Parameters:
|
13
|
-
**kwargs: Additional keyword arguments for configuration.
|
14
|
-
|
15
|
-
Returns:
|
16
|
-
dict: The payload for the request.
|
17
|
-
"""
|
18
|
-
pass
|
19
|
-
|
20
|
-
@abc.abstractmethod
|
21
|
-
def process_response(self, response):
|
22
|
-
"""
|
23
|
-
Process the response from the API call.
|
24
|
-
|
25
|
-
Parameters:
|
26
|
-
response: The response to process.
|
27
|
-
"""
|
28
|
-
pass
|
29
|
-
|
30
|
-
|
31
|
-
class ChatCompletion(BaseEndpoint):
|
32
|
-
endpoint: str = "chat/completion"
|
33
|
-
|
34
|
-
@classmethod
|
35
|
-
def create_payload(scls, messages, llmconfig, schema, **kwargs):
|
36
|
-
config = {**llmconfig, **kwargs}
|
37
|
-
payload = {"messages": messages}
|
38
|
-
for key in schema['required']:
|
39
|
-
payload.update({key: config[key]})
|
40
|
-
|
41
|
-
for key in schema['optional']:
|
42
|
-
if bool(config[key]) is True and str(config[key]).lower() != "none":
|
43
|
-
payload.update({key: config[key]})
|
44
|
-
return payload
|
45
|
-
|
46
|
-
def process_response(self, session, payload, completion):
|
47
|
-
...
|
48
|
-
|