lionagi 0.0.114__py3-none-any.whl → 0.0.116__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +7 -4
- lionagi/bridge/__init__.py +19 -4
- lionagi/bridge/langchain.py +23 -3
- lionagi/bridge/llama_index.py +5 -3
- lionagi/configs/__init__.py +1 -1
- lionagi/configs/oai_configs.py +88 -1
- lionagi/core/__init__.py +6 -9
- lionagi/core/conversations/__init__.py +5 -0
- lionagi/core/conversations/conversation.py +107 -0
- lionagi/core/flows/__init__.py +8 -0
- lionagi/core/flows/flow.py +8 -0
- lionagi/core/flows/flow_util.py +62 -0
- lionagi/core/instruction_set/__init__.py +5 -0
- lionagi/core/instruction_set/instruction_sets.py +7 -0
- lionagi/core/sessions/__init__.py +5 -0
- lionagi/core/sessions/sessions.py +187 -0
- lionagi/endpoints/__init__.py +5 -0
- lionagi/endpoints/assistants.py +0 -0
- lionagi/endpoints/audio.py +17 -0
- lionagi/endpoints/chatcompletion.py +54 -0
- lionagi/endpoints/embeddings.py +0 -0
- lionagi/endpoints/finetune.py +0 -0
- lionagi/endpoints/image.py +0 -0
- lionagi/endpoints/moderation.py +0 -0
- lionagi/endpoints/vision.py +0 -0
- lionagi/{loader → loaders}/__init__.py +7 -1
- lionagi/{loader → loaders}/chunker.py +6 -12
- lionagi/{utils/load_utils.py → loaders/load_util.py} +47 -6
- lionagi/{loader → loaders}/reader.py +4 -12
- lionagi/messages/__init__.py +11 -0
- lionagi/messages/instruction.py +15 -0
- lionagi/messages/message.py +110 -0
- lionagi/messages/response.py +33 -0
- lionagi/messages/system.py +12 -0
- lionagi/objs/__init__.py +10 -6
- lionagi/objs/abc_objs.py +39 -0
- lionagi/objs/async_queue.py +135 -0
- lionagi/objs/messenger.py +70 -148
- lionagi/objs/status_tracker.py +37 -0
- lionagi/objs/{tool_registry.py → tool_manager.py} +8 -6
- lionagi/schema/__init__.py +3 -3
- lionagi/schema/base_node.py +251 -0
- lionagi/schema/base_tool.py +8 -3
- lionagi/schema/data_logger.py +2 -3
- lionagi/schema/data_node.py +37 -0
- lionagi/services/__init__.py +1 -4
- lionagi/services/base_api_service.py +15 -5
- lionagi/services/oai.py +2 -2
- lionagi/services/openrouter.py +2 -3
- lionagi/structures/graph.py +96 -0
- lionagi/{structure → structures}/relationship.py +10 -2
- lionagi/structures/structure.py +102 -0
- lionagi/tests/test_api_util.py +46 -0
- lionagi/tests/test_call_util.py +115 -0
- lionagi/tests/test_convert_util.py +202 -0
- lionagi/tests/test_encrypt_util.py +33 -0
- lionagi/tests/{test_flatten_util.py → test_flat_util.py} +1 -1
- lionagi/tests/test_io_util.py +0 -0
- lionagi/tests/test_sys_util.py +0 -0
- lionagi/tools/__init__.py +5 -0
- lionagi/tools/tool_util.py +7 -0
- lionagi/utils/__init__.py +55 -35
- lionagi/utils/api_util.py +19 -17
- lionagi/utils/call_util.py +2 -1
- lionagi/utils/convert_util.py +229 -0
- lionagi/utils/encrypt_util.py +16 -0
- lionagi/utils/flat_util.py +38 -0
- lionagi/utils/io_util.py +2 -2
- lionagi/utils/sys_util.py +45 -10
- lionagi/version.py +1 -1
- {lionagi-0.0.114.dist-info → lionagi-0.0.116.dist-info}/METADATA +2 -2
- lionagi-0.0.116.dist-info/RECORD +110 -0
- lionagi/core/conversations.py +0 -108
- lionagi/core/flows.py +0 -1
- lionagi/core/instruction_sets.py +0 -1
- lionagi/core/messages.py +0 -166
- lionagi/core/sessions.py +0 -297
- lionagi/schema/base_schema.py +0 -252
- lionagi/services/chatcompletion.py +0 -48
- lionagi/services/service_objs.py +0 -282
- lionagi/structure/structure.py +0 -160
- lionagi/tools/coder.py +0 -1
- lionagi/tools/sandbox.py +0 -1
- lionagi/utils/tool_util.py +0 -92
- lionagi/utils/type_util.py +0 -81
- lionagi-0.0.114.dist-info/RECORD +0 -84
- /lionagi/configs/{openrouter_config.py → openrouter_configs.py} +0 -0
- /lionagi/{datastore → datastores}/__init__.py +0 -0
- /lionagi/{datastore → datastores}/chroma.py +0 -0
- /lionagi/{datastore → datastores}/deeplake.py +0 -0
- /lionagi/{datastore → datastores}/elasticsearch.py +0 -0
- /lionagi/{datastore → datastores}/lantern.py +0 -0
- /lionagi/{datastore → datastores}/pinecone.py +0 -0
- /lionagi/{datastore → datastores}/postgres.py +0 -0
- /lionagi/{datastore → datastores}/qdrant.py +0 -0
- /lionagi/{structure → structures}/__init__.py +0 -0
- {lionagi-0.0.114.dist-info → lionagi-0.0.116.dist-info}/LICENSE +0 -0
- {lionagi-0.0.114.dist-info → lionagi-0.0.116.dist-info}/WHEEL +0 -0
- {lionagi-0.0.114.dist-info → lionagi-0.0.116.dist-info}/top_level.txt +0 -0
lionagi/__init__.py
CHANGED
@@ -17,13 +17,16 @@ Copyright 2023 HaiyangLi <ocean@lionagi.ai>
|
|
17
17
|
import logging
|
18
18
|
from .version import __version__
|
19
19
|
|
20
|
+
|
20
21
|
from .utils import *
|
21
22
|
from .schema import *
|
22
|
-
from .
|
23
|
-
from .
|
23
|
+
from .structures import *
|
24
|
+
from .loaders import *
|
25
|
+
from .messages import *
|
24
26
|
from .objs import *
|
25
|
-
|
26
|
-
|
27
|
+
from .tools import *
|
28
|
+
from .core import *
|
29
|
+
|
27
30
|
|
28
31
|
|
29
32
|
logger = logging.getLogger(__name__)
|
lionagi/bridge/__init__.py
CHANGED
@@ -1,7 +1,22 @@
|
|
1
|
-
from .langchain import
|
2
|
-
|
1
|
+
from .langchain import(
|
2
|
+
from_langchain, to_langchain_document, langchain_loader,
|
3
|
+
langchain_loader, langchain_text_splitter
|
4
|
+
)
|
5
|
+
|
6
|
+
from .llama_index import (
|
7
|
+
from_llama_index, to_llama_index_textnode, get_llama_reader,
|
8
|
+
llama_index_reader, get_llama_parser, llama_index_node_parser
|
9
|
+
)
|
3
10
|
|
4
11
|
__all__ = [
|
5
|
-
|
6
|
-
|
12
|
+
'from_langchain',
|
13
|
+
'to_langchain_document',
|
14
|
+
'langchain_loader',
|
15
|
+
'from_llama_index',
|
16
|
+
'to_llama_index_textnode',
|
17
|
+
'get_llama_reader',
|
18
|
+
'llama_index_reader',
|
19
|
+
'get_llama_parser',
|
20
|
+
'llama_index_node_parser',
|
21
|
+
'langchain_text_splitter'
|
7
22
|
]
|
lionagi/bridge/langchain.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
|
-
from typing import Union, Callable, List, Dict, Any
|
2
|
-
from
|
3
|
-
from
|
1
|
+
from typing import Union, Callable, List, Dict, Any, TypeVar
|
2
|
+
from lionagi.schema.data_node import DataNode
|
3
|
+
from lionagi.utils.sys_util import change_dict_key
|
4
4
|
|
5
|
+
T = TypeVar('T', bound='DataNode')
|
5
6
|
|
6
7
|
def from_langchain(lc_doc: Any) -> T:
|
7
8
|
"""
|
@@ -79,6 +80,25 @@ def langchain_text_splitter(data: Union[str, List],
|
|
79
80
|
splitter_args: List[Any] = [],
|
80
81
|
splitter_kwargs: Dict[str, Any] = {}) -> List[str]:
|
81
82
|
|
83
|
+
"""
|
84
|
+
Splits text or a list of documents using a specified langchain text splitter.
|
85
|
+
|
86
|
+
Parameters:
|
87
|
+
data (Union[str, List]): The input text or list of documents to be split.
|
88
|
+
|
89
|
+
splitter (Union[str, Callable]): The name of the text splitter function or the function itself.
|
90
|
+
|
91
|
+
splitter_args (List[Any]): Positional arguments to pass to the splitter function.
|
92
|
+
|
93
|
+
splitter_kwargs (Dict[str, Any]): Keyword arguments to pass to the splitter function.
|
94
|
+
|
95
|
+
Returns:
|
96
|
+
List[str]: A list of chunks obtained by splitting the input.
|
97
|
+
|
98
|
+
Raises:
|
99
|
+
ValueError: If the specified text splitter is invalid or if the splitting fails.
|
100
|
+
"""
|
101
|
+
|
82
102
|
import langchain.text_splitter as text_splitter
|
83
103
|
|
84
104
|
try:
|
lionagi/bridge/llama_index.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
|
-
from typing import Union, Callable, List, Any, Dict
|
2
|
-
from
|
3
|
-
from
|
1
|
+
from typing import Union, Callable, List, Any, Dict, TypeVar
|
2
|
+
from lionagi.schema.data_node import DataNode
|
3
|
+
from lionagi.utils.sys_util import change_dict_key
|
4
4
|
|
5
|
+
T = TypeVar('T', bound='DataNode')
|
5
6
|
|
6
7
|
def from_llama_index(llama_node: Any, **kwargs: Any) -> T:
|
7
8
|
"""
|
@@ -36,6 +37,7 @@ def to_llama_index_textnode(datanode: T, **kwargs: Any) -> Any:
|
|
36
37
|
dnode = datanode.to_dict()
|
37
38
|
change_dict_key(dnode, old_key='content', new_key='text')
|
38
39
|
change_dict_key(dnode, old_key='node_id', new_key='id_')
|
40
|
+
dnode['text'] = str(dnode['text'])
|
39
41
|
|
40
42
|
dnode = {**dnode, **kwargs}
|
41
43
|
return TextNode.from_dict(dnode)
|
lionagi/configs/__init__.py
CHANGED
lionagi/configs/oai_configs.py
CHANGED
@@ -1,3 +1,6 @@
|
|
1
|
+
# Default configs for the OpenAI API
|
2
|
+
|
3
|
+
# ChatCompletion
|
1
4
|
oai_chat_llmconfig = {
|
2
5
|
"model": "gpt-4-1106-preview",
|
3
6
|
"frequency_penalty": 0,
|
@@ -22,6 +25,8 @@ oai_chat_schema = {
|
|
22
25
|
"config": oai_chat_llmconfig
|
23
26
|
}
|
24
27
|
|
28
|
+
|
29
|
+
# Finetune
|
25
30
|
oai_finetune_llmconfig = {
|
26
31
|
"model": "gpt-3.5-turbo",
|
27
32
|
"hyperparameters": {
|
@@ -41,9 +46,91 @@ oai_finetune_schema = {
|
|
41
46
|
}
|
42
47
|
|
43
48
|
|
49
|
+
|
50
|
+
# Embeddings
|
51
|
+
|
52
|
+
|
53
|
+
|
54
|
+
|
55
|
+
|
56
|
+
|
57
|
+
|
58
|
+
|
59
|
+
|
60
|
+
|
61
|
+
# Audio ---- create speech
|
62
|
+
|
63
|
+
oai_audio_speech_llmconfig = {
|
64
|
+
"model": "tts-1",
|
65
|
+
"voice": "alloy",
|
66
|
+
"response_format": "mp3",
|
67
|
+
"speed": 1
|
68
|
+
}
|
69
|
+
oai_audio_speech_schema = {
|
70
|
+
"required" : ["model", "voice"],
|
71
|
+
"optional": ["response_format", "speed"],
|
72
|
+
"input": "input",
|
73
|
+
"config": oai_audio_speech_llmconfig
|
74
|
+
}
|
75
|
+
|
76
|
+
|
77
|
+
# Audio ----------- create transcription
|
78
|
+
oai_audio_transcriptions_llmconfig = {
|
79
|
+
"model": "whisper-1",
|
80
|
+
"language": None,
|
81
|
+
"prompt": None,
|
82
|
+
"response_format": "json",
|
83
|
+
"temperature": 0
|
84
|
+
}
|
85
|
+
oai_audio_transcriptions_schema = {
|
86
|
+
"required" : ["model", "voice"],
|
87
|
+
"optional": ["response_format", "language", "prompt", "response_format", "temperature"],
|
88
|
+
"input": "file",
|
89
|
+
"config": oai_audio_transcriptions_llmconfig
|
90
|
+
}
|
91
|
+
|
92
|
+
|
93
|
+
# Audio ------------ translations
|
94
|
+
oai_audio_translations_llmconfig = {
|
95
|
+
"model": "whisper-1",
|
96
|
+
"prompt": None,
|
97
|
+
"response_format": "json",
|
98
|
+
"temperature": 0
|
99
|
+
}
|
100
|
+
|
101
|
+
oai_audio_translations_schema = {
|
102
|
+
"required" : ["model"],
|
103
|
+
"optional": ["response_format", "speed", "prompt", "temperature"],
|
104
|
+
"input": "file",
|
105
|
+
"config": oai_audio_translations_llmconfig
|
106
|
+
}
|
107
|
+
|
108
|
+
|
109
|
+
|
110
|
+
|
111
|
+
|
112
|
+
|
113
|
+
|
114
|
+
|
115
|
+
# images
|
116
|
+
|
117
|
+
|
118
|
+
|
119
|
+
|
120
|
+
|
121
|
+
|
122
|
+
|
123
|
+
|
124
|
+
|
125
|
+
|
126
|
+
|
127
|
+
|
44
128
|
oai_schema = {
|
45
129
|
|
46
130
|
"chat": oai_chat_schema,
|
47
|
-
"finetune": oai_finetune_schema
|
131
|
+
"finetune": oai_finetune_schema,
|
132
|
+
"audio_speech": oai_audio_speech_schema,
|
133
|
+
"audio_transcriptions": oai_audio_transcriptions_schema,
|
134
|
+
"audio_translations": oai_audio_translations_schema,
|
48
135
|
|
49
136
|
}
|
lionagi/core/__init__.py
CHANGED
@@ -1,15 +1,12 @@
|
|
1
|
-
# from .
|
1
|
+
# from .instruction_set import InstructionSet
|
2
2
|
from .conversations import Conversation
|
3
3
|
from .sessions import Session
|
4
|
+
from .flows import run_session #, Flow
|
4
5
|
|
5
|
-
# from .instruction_sets import InstructionSet
|
6
|
-
# from .flows.flow import Flow
|
7
6
|
|
8
|
-
|
9
|
-
__all__ = [
|
10
|
-
# "Response",
|
11
|
-
# "Instruction",
|
12
|
-
# "System",
|
7
|
+
__all__ = [
|
13
8
|
"Conversation",
|
14
|
-
"Session",
|
9
|
+
"Session",
|
10
|
+
"run_session",
|
11
|
+
# "Flow"
|
15
12
|
]
|
@@ -0,0 +1,107 @@
|
|
1
|
+
from typing import List, Any
|
2
|
+
|
3
|
+
from lionagi.schema.base_node import BaseNode
|
4
|
+
from lionagi.messages import Message, Response
|
5
|
+
from lionagi.objs.messenger import Messenger
|
6
|
+
|
7
|
+
|
8
|
+
class Conversation(BaseNode):
|
9
|
+
"""
|
10
|
+
A conversation that handles messages and responses.
|
11
|
+
|
12
|
+
Attributes:
|
13
|
+
response_counts (int): A counter for the number of responses in the conversation.
|
14
|
+
messages (List[Message]): A list of message objects in the conversation.
|
15
|
+
msgr (Messenger): An instance of Messenger to create message objects.
|
16
|
+
responses (List[Response]): A list of response objects in the conversation.
|
17
|
+
"""
|
18
|
+
|
19
|
+
response_counts : int = 0
|
20
|
+
messages: List[Message] = []
|
21
|
+
msgr : Any = Messenger()
|
22
|
+
responses: List[Response] = []
|
23
|
+
|
24
|
+
def initiate_conversation(
|
25
|
+
self, system=None, instruction=None,
|
26
|
+
context=None, name=None
|
27
|
+
):
|
28
|
+
"""
|
29
|
+
Initiates a new conversation, erase previous messages and responses.
|
30
|
+
|
31
|
+
Parameters:
|
32
|
+
system (Any, optional): System information to include in the initial message. Defaults to None.
|
33
|
+
instruction (Any, optional): Instruction details to include in the conversation. Defaults to None.
|
34
|
+
context (Any, optional): Contextual information relevant to the conversation. Defaults to None.
|
35
|
+
name (str, optional): The name associated with the conversation. Defaults to None.
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
None
|
39
|
+
"""
|
40
|
+
self.messages, self.responses = [], []
|
41
|
+
self.add_messages(system=system)
|
42
|
+
self.add_messages(instruction=instruction, context=context, name=name)
|
43
|
+
|
44
|
+
# modify the message adding to accomodate tools
|
45
|
+
def add_messages(
|
46
|
+
self, system=None, instruction=None,
|
47
|
+
context=None, response=None, name=None
|
48
|
+
):
|
49
|
+
"""
|
50
|
+
Adds a new message object to the conversation messages list based on the provided parameters.
|
51
|
+
|
52
|
+
Parameters:
|
53
|
+
system (Any, optional): System information to include in the message. Defaults to None.
|
54
|
+
instruction (Any, optional): Instruction details to include in the message. Defaults to None.
|
55
|
+
context (Any, optional): Contextual information relevant to the message. Defaults to None.
|
56
|
+
response (Any, optional): Response details to include in the message. Defaults to None.
|
57
|
+
name (str, optional): The name associated with the message. Defaults to None.
|
58
|
+
|
59
|
+
Returns:
|
60
|
+
None
|
61
|
+
"""
|
62
|
+
msg = self.msgr.create_message(
|
63
|
+
system=system, instruction=instruction,
|
64
|
+
context=context, response=response, name=name
|
65
|
+
)
|
66
|
+
self.messages.append(msg)
|
67
|
+
|
68
|
+
def change_system(self, system):
|
69
|
+
"""
|
70
|
+
Changes the system information of the first message in the conversation.
|
71
|
+
|
72
|
+
Parameters:
|
73
|
+
system (Any): The new system information to be set.
|
74
|
+
|
75
|
+
Returns:
|
76
|
+
None
|
77
|
+
"""
|
78
|
+
self.messages[0] = self.msgr.create_message(system=system)
|
79
|
+
|
80
|
+
|
81
|
+
def keep_last_n_exchanges(self, n: int):
|
82
|
+
"""
|
83
|
+
Keeps only the last n exchanges in the conversation, where an exchange starts with a user message. This function trims the conversation to retain only the specified number of the most recent exchanges.
|
84
|
+
An exchange is defined as a sequence of messages starting with a user message.
|
85
|
+
The first message in the conversation, typically a system message, is always retained.
|
86
|
+
|
87
|
+
Parameters:
|
88
|
+
n (int): The number of exchanges to keep in the conversation.
|
89
|
+
|
90
|
+
Returns:
|
91
|
+
None: The method modifies the conversation in place and does not return a value.
|
92
|
+
|
93
|
+
Raises:
|
94
|
+
ValueError: If n is not a positive integer.
|
95
|
+
|
96
|
+
Note:
|
97
|
+
This function assumes the first message in the conversation is a system message and each user message
|
98
|
+
marks the beginning of a new exchange.
|
99
|
+
"""
|
100
|
+
response_indices = [
|
101
|
+
index for index, message in enumerate(self.messages[1:])
|
102
|
+
if message.role == "user"
|
103
|
+
]
|
104
|
+
if len(response_indices) >= n:
|
105
|
+
first_index_to_keep = response_indices[-n] + 1
|
106
|
+
self.messages = [self.system] + self.messages[first_index_to_keep:]
|
107
|
+
|
@@ -0,0 +1,62 @@
|
|
1
|
+
from ..sessions import Session
|
2
|
+
|
3
|
+
def get_config(temperature, max_tokens, key_scheme, n):
|
4
|
+
f = lambda i:{
|
5
|
+
"temperature": temperature[i],
|
6
|
+
"max_tokens": max_tokens[i],
|
7
|
+
}
|
8
|
+
return {
|
9
|
+
"key": f"{key_scheme}{n+1}",
|
10
|
+
"config": f(n)
|
11
|
+
}
|
12
|
+
|
13
|
+
async def run_workflow(
|
14
|
+
session, prompts, temperature, max_tokens,
|
15
|
+
key_scheme, num_prompts, context
|
16
|
+
):
|
17
|
+
for i in range(num_prompts):
|
18
|
+
key_, config_ = get_config(temperature, max_tokens, key_scheme, i)
|
19
|
+
if i == 0:
|
20
|
+
await session.initiate(instruction=prompts[key_], context=context, **config_)
|
21
|
+
else:
|
22
|
+
await session.followup(instruction=prompts[key_], **config_)
|
23
|
+
|
24
|
+
return session
|
25
|
+
|
26
|
+
async def run_auto_workflow(
|
27
|
+
session, prompts, temperature, max_tokens,
|
28
|
+
key_scheme, num_prompts, context
|
29
|
+
):
|
30
|
+
for i in range(num_prompts):
|
31
|
+
key_, config_ = get_config(temperature, max_tokens, key_scheme, i)
|
32
|
+
if i == 0:
|
33
|
+
await session.initiate(instruction=prompts[key_], context=context, **config_)
|
34
|
+
else:
|
35
|
+
await session.auto_followup(instruction=prompts[key_], **config_)
|
36
|
+
|
37
|
+
return session
|
38
|
+
|
39
|
+
async def run_session(
|
40
|
+
prompts, dir, llmconfig, key_scheme, num_prompts,
|
41
|
+
temperature, max_tokens, type_=None, tools=None
|
42
|
+
):
|
43
|
+
prompts_ = prompts.copy()
|
44
|
+
session = Session(
|
45
|
+
system=prompts_.pop('system', 'You are a helpful assistant'),
|
46
|
+
dir = dir,
|
47
|
+
llmconfig = llmconfig
|
48
|
+
)
|
49
|
+
if tools:
|
50
|
+
session.register_tools(tools)
|
51
|
+
if type_ is None:
|
52
|
+
session = await run_workflow(
|
53
|
+
session, prompts_, temperature, max_tokens,
|
54
|
+
key_scheme=key_scheme, num_prompts=num_prompts
|
55
|
+
)
|
56
|
+
elif type_ == 'auto':
|
57
|
+
session = await run_auto_workflow(
|
58
|
+
session, prompts_, temperature, max_tokens,
|
59
|
+
key_scheme=key_scheme, num_prompts=num_prompts
|
60
|
+
)
|
61
|
+
|
62
|
+
return session
|
@@ -0,0 +1,187 @@
|
|
1
|
+
import json
|
2
|
+
from typing import Any
|
3
|
+
from dotenv import load_dotenv
|
4
|
+
|
5
|
+
from lionagi.schema import DataLogger, Tool
|
6
|
+
from lionagi.utils import lcall, alcall
|
7
|
+
from lionagi.services import OpenAIService
|
8
|
+
from lionagi.endpoints import ChatCompletion
|
9
|
+
from lionagi.objs.tool_manager import ToolManager
|
10
|
+
from lionagi.configs.oai_configs import oai_schema
|
11
|
+
from lionagi.core.conversations.conversation import Conversation
|
12
|
+
|
13
|
+
load_dotenv()
|
14
|
+
OAIService = OpenAIService()
|
15
|
+
|
16
|
+
|
17
|
+
class Session:
|
18
|
+
|
19
|
+
def __init__(
|
20
|
+
self, system, dir=None, llmconfig=oai_schema['chat']['config'],
|
21
|
+
service=OAIService
|
22
|
+
):
|
23
|
+
self.conversation = Conversation()
|
24
|
+
self.system = system
|
25
|
+
self.llmconfig = llmconfig
|
26
|
+
self.logger_ = DataLogger(dir=dir)
|
27
|
+
self.service = service
|
28
|
+
self.tool_manager = ToolManager()
|
29
|
+
|
30
|
+
def set_dir(self, dir):
|
31
|
+
self.logger_.dir = dir
|
32
|
+
|
33
|
+
def set_system(self, system):
|
34
|
+
self.conversation.change_system(system)
|
35
|
+
|
36
|
+
def set_llmconfig(self, llmconfig):
|
37
|
+
self.llmconfig = llmconfig
|
38
|
+
|
39
|
+
def set_service(self, service):
|
40
|
+
self.service = service
|
41
|
+
|
42
|
+
async def _output(self, invoke=True, out=True):
|
43
|
+
if invoke:
|
44
|
+
try:
|
45
|
+
# func, args = self.tool_manager._get_function_call(self.conversation.responses[-1]['content'])
|
46
|
+
# outs = await self.tool_manager.invoke(func, args)
|
47
|
+
# self.conversation.add_messages(response=outs)
|
48
|
+
|
49
|
+
tool_uses = json.loads(self.conversation.responses[-1].message_content)
|
50
|
+
if 'function_list' in tool_uses.keys():
|
51
|
+
func_calls = lcall(tool_uses['function_list'], self.tool_manager.get_function_call)
|
52
|
+
else:
|
53
|
+
func_calls = lcall(tool_uses['tool_uses'], self.tool_manager.get_function_call)
|
54
|
+
|
55
|
+
outs = await alcall(func_calls, self.tool_manager.invoke)
|
56
|
+
for out, f in zip(outs, func_calls):
|
57
|
+
response = {"function": f[0], "arguments": f[1], "output": out}
|
58
|
+
self.conversation.add_messages(response=response)
|
59
|
+
except:
|
60
|
+
pass
|
61
|
+
if out:
|
62
|
+
return self.conversation.responses[-1].message_content
|
63
|
+
|
64
|
+
def _is_invoked(self):
|
65
|
+
content = self.conversation.messages[-1].message_content
|
66
|
+
try:
|
67
|
+
if json.loads(content).keys() >= {'function', 'arguments', 'output'}:
|
68
|
+
return True
|
69
|
+
except:
|
70
|
+
return False
|
71
|
+
|
72
|
+
def register_tools(self, tools): #, update=False, new=False, prefix=None, postfix=None):
|
73
|
+
if not isinstance(tools, list):
|
74
|
+
tools=[tools]
|
75
|
+
self.tool_manager.register_tools(tools=tools) #, update=update, new=new, prefix=prefix, postfix=postfix)
|
76
|
+
# tools_schema = lcall(tools, lambda tool: tool.to_dict()['schema_'])
|
77
|
+
# if self.llmconfig['tools'] is None:
|
78
|
+
# self.llmconfig['tools'] = tools_schema
|
79
|
+
# else:
|
80
|
+
# self.llmconfig['tools'] += tools_schema
|
81
|
+
|
82
|
+
def _tool_parser(self, **kwargs):
|
83
|
+
# 1. single schema: dict
|
84
|
+
# 2. tool: Tool
|
85
|
+
# 3. name: str
|
86
|
+
# 4. list: 3 types of lists
|
87
|
+
def tool_check(tool):
|
88
|
+
if isinstance(tool, dict):
|
89
|
+
return tool
|
90
|
+
elif isinstance(tool, Tool):
|
91
|
+
return tool.schema_
|
92
|
+
elif isinstance(tool, str):
|
93
|
+
if self.tool_manager.name_existed(tool):
|
94
|
+
tool = self.tool_manager.registry[tool]
|
95
|
+
return tool.schema_
|
96
|
+
else:
|
97
|
+
raise ValueError(f'Function {tool} is not registered.')
|
98
|
+
|
99
|
+
if 'tools' in kwargs:
|
100
|
+
if not isinstance(kwargs['tools'], list):
|
101
|
+
kwargs['tools']=[kwargs['tools']]
|
102
|
+
kwargs['tools'] = lcall(kwargs['tools'], tool_check)
|
103
|
+
|
104
|
+
else:
|
105
|
+
tool_kwarg = {"tools": self.tool_manager.to_tool_schema_list()}
|
106
|
+
kwargs = {**tool_kwarg, **kwargs}
|
107
|
+
|
108
|
+
return kwargs
|
109
|
+
|
110
|
+
async def initiate(self, instruction, system=None, context=None,
|
111
|
+
name=None, invoke=True, out=True, **kwargs) -> Any:
|
112
|
+
# if self.tool_manager.registry != {}:
|
113
|
+
# if 'tools' not in kwargs:
|
114
|
+
# tool_kwarg = {"tools": self.tool_manager.to_tool_schema_list()}
|
115
|
+
# kwargs = {**tool_kwarg, **kwargs}
|
116
|
+
if self.tool_manager.registry != {}:
|
117
|
+
kwargs = self._tool_parser(**kwargs)
|
118
|
+
if self.service is not None:
|
119
|
+
await self.service._init()
|
120
|
+
config = {**self.llmconfig, **kwargs}
|
121
|
+
system = system or self.system
|
122
|
+
self.conversation.initiate_conversation(system=system, instruction=instruction, context=context, name=name)
|
123
|
+
await self.call_chatcompletion(**config)
|
124
|
+
|
125
|
+
return await self._output(invoke, out)
|
126
|
+
|
127
|
+
async def followup(self, instruction, system=None, context=None,
|
128
|
+
out=True, name=None, invoke=True, **kwargs) -> Any:
|
129
|
+
if system:
|
130
|
+
self.conversation.change_system(system)
|
131
|
+
self.conversation.add_messages(instruction=instruction, context=context, name=name)
|
132
|
+
|
133
|
+
if 'tool_parsed' in kwargs:
|
134
|
+
kwargs.pop('tool_parsed')
|
135
|
+
else:
|
136
|
+
if self.tool_manager.registry != {}:
|
137
|
+
kwargs = self._tool_parser(**kwargs)
|
138
|
+
# if self.tool_manager.registry != {}:
|
139
|
+
# if 'tools' not in kwargs:
|
140
|
+
# tool_kwarg = {"tools": self.tool_manager.to_tool_schema_list()}
|
141
|
+
# kwargs = {**tool_kwarg, **kwargs}
|
142
|
+
config = {**self.llmconfig, **kwargs}
|
143
|
+
await self.call_chatcompletion(**config)
|
144
|
+
|
145
|
+
return await self._output(invoke, out)
|
146
|
+
|
147
|
+
async def auto_followup(self, instruct, num=3, **kwargs):
|
148
|
+
# if self.tool_manager.registry != {}:
|
149
|
+
# if 'tools' not in kwargs:
|
150
|
+
# tool_kwarg = {"tools": self.tool_manager.to_tool_schema_list()}
|
151
|
+
# kwargs = {**tool_kwarg, **kwargs}
|
152
|
+
if self.tool_manager.registry != {}:
|
153
|
+
kwargs = self._tool_parser(**kwargs)
|
154
|
+
|
155
|
+
cont_ = True
|
156
|
+
while num > 0 and cont_ is True:
|
157
|
+
await self.followup(instruct, tool_choice="auto", tool_parsed=True, **kwargs)
|
158
|
+
num -= 1
|
159
|
+
cont_ = True if self._is_invoked() else False
|
160
|
+
if num == 0:
|
161
|
+
await self.followup(instruct, **kwargs, tool_parsed=True)
|
162
|
+
|
163
|
+
# def messages_to_csv(self, dir=None, filename="messages.csv", **kwargs):
|
164
|
+
# dir = dir or self.logger_.dir
|
165
|
+
# if dir is None:
|
166
|
+
# raise ValueError("No directory specified.")
|
167
|
+
# self.conversation.msg.to_csv(dir=dir, filename=filename, **kwargs)
|
168
|
+
|
169
|
+
# def log_to_csv(self, dir=None, filename="llmlog.csv", **kwargs):
|
170
|
+
# dir = dir or self.logger_.dir
|
171
|
+
# if dir is None:
|
172
|
+
# raise ValueError("No directory specified.")
|
173
|
+
# self.logger_.to_csv(dir=dir, filename=filename, **kwargs)
|
174
|
+
|
175
|
+
async def call_chatcompletion(self, schema=oai_schema['chat'], **kwargs):
|
176
|
+
messages = [message.message for message in self.conversation.messages]
|
177
|
+
payload = ChatCompletion.create_payload(messages=messages, schema=schema, llmconfig=self.llmconfig,**kwargs)
|
178
|
+
completion = await self.service.serve(payload=payload)
|
179
|
+
if "choices" in completion:
|
180
|
+
self.logger_({"input":payload, "output": completion})
|
181
|
+
self.conversation.add_messages(response=completion['choices'][0])
|
182
|
+
self.conversation.responses.append(self.conversation.messages[-1])
|
183
|
+
self.conversation.response_counts += 1
|
184
|
+
self.service.status_tracker.num_tasks_succeeded += 1
|
185
|
+
else:
|
186
|
+
self.service.status_tracker.num_tasks_failed += 1
|
187
|
+
|
File without changes
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# from .base_endpoint import BaseEndpoint
|
2
|
+
|
3
|
+
|
4
|
+
# class Audio(BaseEndpoint):
|
5
|
+
# endpoint: str = "chat/completions"
|
6
|
+
|
7
|
+
# @classmethod
|
8
|
+
# def create_payload(scls, messages, llmconfig, schema, **kwargs):
|
9
|
+
# config = {**llmconfig, **kwargs}
|
10
|
+
# payload = {"messages": messages}
|
11
|
+
# for key in schema['required']:
|
12
|
+
# payload.update({key: config[key]})
|
13
|
+
|
14
|
+
# for key in schema['optional']:
|
15
|
+
# if bool(config[key]) is True and str(config[key]).lower() != "none":
|
16
|
+
# payload.update({key: config[key]})
|
17
|
+
# return payload
|