lionagi 0.0.112__py3-none-any.whl → 0.0.113__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +3 -3
- lionagi/bridge/__init__.py +7 -0
- lionagi/bridge/langchain.py +131 -0
- lionagi/bridge/llama_index.py +157 -0
- lionagi/configs/__init__.py +7 -0
- lionagi/configs/oai_configs.py +49 -0
- lionagi/configs/openrouter_config.py +49 -0
- lionagi/core/__init__.py +8 -2
- lionagi/core/instruction_sets.py +1 -3
- lionagi/core/messages.py +2 -2
- lionagi/core/sessions.py +174 -27
- lionagi/datastore/__init__.py +1 -0
- lionagi/loader/__init__.py +9 -4
- lionagi/loader/chunker.py +157 -0
- lionagi/loader/reader.py +124 -0
- lionagi/objs/__init__.py +7 -0
- lionagi/objs/messenger.py +163 -0
- lionagi/objs/tool_registry.py +247 -0
- lionagi/schema/__init__.py +11 -0
- lionagi/schema/base_schema.py +239 -0
- lionagi/schema/base_tool.py +9 -0
- lionagi/schema/data_logger.py +94 -0
- lionagi/services/__init__.py +14 -0
- lionagi/{service_/oai.py → services/base_api_service.py} +49 -82
- lionagi/{endpoint/base_endpoint.py → services/chatcompletion.py} +19 -22
- lionagi/services/oai.py +34 -0
- lionagi/services/openrouter.py +32 -0
- lionagi/{service_/service_utils.py → services/service_objs.py} +0 -1
- lionagi/structure/__init__.py +7 -0
- lionagi/structure/relationship.py +128 -0
- lionagi/structure/structure.py +160 -0
- lionagi/tests/test_flatten_util.py +426 -0
- lionagi/tools/__init__.py +0 -5
- lionagi/tools/coder.py +1 -0
- lionagi/tools/scorer.py +1 -0
- lionagi/tools/validator.py +1 -0
- lionagi/utils/__init__.py +46 -20
- lionagi/utils/api_util.py +86 -0
- lionagi/utils/call_util.py +347 -0
- lionagi/utils/flat_util.py +540 -0
- lionagi/utils/io_util.py +102 -0
- lionagi/utils/load_utils.py +190 -0
- lionagi/utils/sys_util.py +191 -0
- lionagi/utils/tool_util.py +92 -0
- lionagi/utils/type_util.py +81 -0
- lionagi/version.py +1 -1
- {lionagi-0.0.112.dist-info → lionagi-0.0.113.dist-info}/METADATA +37 -13
- lionagi-0.0.113.dist-info/RECORD +84 -0
- lionagi/endpoint/chat_completion.py +0 -20
- lionagi/endpoint/endpoint_utils.py +0 -0
- lionagi/llm_configs.py +0 -21
- lionagi/loader/load_utils.py +0 -161
- lionagi/schema.py +0 -275
- lionagi/service_/__init__.py +0 -6
- lionagi/service_/base_service.py +0 -48
- lionagi/service_/openrouter.py +0 -1
- lionagi/services.py +0 -1
- lionagi/tools/tool_utils.py +0 -75
- lionagi/utils/sys_utils.py +0 -799
- lionagi-0.0.112.dist-info/RECORD +0 -67
- /lionagi/{core/responses.py → datastore/chroma.py} +0 -0
- /lionagi/{endpoint/assistants.py → datastore/deeplake.py} +0 -0
- /lionagi/{endpoint/audio.py → datastore/elasticsearch.py} +0 -0
- /lionagi/{endpoint/embeddings.py → datastore/lantern.py} +0 -0
- /lionagi/{endpoint/files.py → datastore/pinecone.py} +0 -0
- /lionagi/{endpoint/fine_tuning.py → datastore/postgres.py} +0 -0
- /lionagi/{endpoint/images.py → datastore/qdrant.py} +0 -0
- /lionagi/{endpoint/messages.py → schema/base_condition.py} +0 -0
- /lionagi/{service_ → services}/anthropic.py +0 -0
- /lionagi/{service_ → services}/anyscale.py +0 -0
- /lionagi/{service_ → services}/azure.py +0 -0
- /lionagi/{service_ → services}/bedrock.py +0 -0
- /lionagi/{service_ → services}/everlyai.py +0 -0
- /lionagi/{service_ → services}/gemini.py +0 -0
- /lionagi/{service_ → services}/gpt4all.py +0 -0
- /lionagi/{service_ → services}/huggingface.py +0 -0
- /lionagi/{service_ → services}/litellm.py +0 -0
- /lionagi/{service_ → services}/localai.py +0 -0
- /lionagi/{service_ → services}/mistralai.py +0 -0
- /lionagi/{service_ → services}/ollama.py +0 -0
- /lionagi/{service_ → services}/openllm.py +0 -0
- /lionagi/{service_ → services}/perplexity.py +0 -0
- /lionagi/{service_ → services}/predibase.py +0 -0
- /lionagi/{service_ → services}/rungpt.py +0 -0
- /lionagi/{service_ → services}/vllm.py +0 -0
- /lionagi/{service_ → services}/xinference.py +0 -0
- /lionagi/{endpoint → tests}/__init__.py +0 -0
- /lionagi/{endpoint/models.py → tools/planner.py} +0 -0
- /lionagi/{endpoint/moderations.py → tools/prompter.py} +0 -0
- /lionagi/{endpoint/runs.py → tools/sandbox.py} +0 -0
- /lionagi/{endpoint/threads.py → tools/summarizer.py} +0 -0
- {lionagi-0.0.112.dist-info → lionagi-0.0.113.dist-info}/LICENSE +0 -0
- {lionagi-0.0.112.dist-info → lionagi-0.0.113.dist-info}/WHEEL +0 -0
- {lionagi-0.0.112.dist-info → lionagi-0.0.113.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,190 @@
|
|
1
|
+
import math
|
2
|
+
from pathlib import Path
|
3
|
+
from typing import List, Union, Dict, Any, Tuple
|
4
|
+
|
5
|
+
from .type_util import to_list
|
6
|
+
from .call_util import lcall
|
7
|
+
from .io_util import to_csv
|
8
|
+
from ..schema.base_schema import DataNode
|
9
|
+
|
10
|
+
|
11
|
+
def dir_to_path(
|
12
|
+
dir: str, ext: str, recursive: bool = False,
|
13
|
+
flatten: bool = True) -> List[Path]:
|
14
|
+
"""
|
15
|
+
Generates a list of file paths from a directory with the given file extension.
|
16
|
+
|
17
|
+
Args:
|
18
|
+
directory (str): The directory to search for files.
|
19
|
+
extension (str): The file extension to filter by.
|
20
|
+
recursive (bool): Whether to search subdirectories recursively. Defaults to False.
|
21
|
+
flatten (bool): Whether to flatten the list. Defaults to True.
|
22
|
+
|
23
|
+
Returns:
|
24
|
+
List[Path]: A list of Paths to the files.
|
25
|
+
|
26
|
+
Raises:
|
27
|
+
ValueError: If the directory or extension is invalid.
|
28
|
+
"""
|
29
|
+
|
30
|
+
def _dir_to_path(ext):
|
31
|
+
tem = '**/*' if recursive else '*'
|
32
|
+
return list(Path(dir).glob(tem + ext))
|
33
|
+
|
34
|
+
try:
|
35
|
+
return to_list(lcall(ext, _dir_to_path, flatten=True), flatten=flatten)
|
36
|
+
except:
|
37
|
+
raise ValueError("Invalid directory or extension, please check the path")
|
38
|
+
|
39
|
+
def dir_to_nodes(dir: str, ext, recursive: bool = False, flatten: bool = True, clean_text: bool = True):
|
40
|
+
path_list = dir_to_path(dir, ext, recursive, flatten)
|
41
|
+
files_info = lcall(path_list, read_text, clean=clean_text)
|
42
|
+
nodes = lcall(files_info, lambda x: DataNode(content=x[0], metadata=x[1]))
|
43
|
+
return nodes
|
44
|
+
|
45
|
+
def chunk_text(input: str,
|
46
|
+
chunk_size: int,
|
47
|
+
overlap: float,
|
48
|
+
threshold: int) -> List[Union[str, None]]:
|
49
|
+
"""
|
50
|
+
Chunks the input text into smaller parts, with optional overlap and threshold for final chunk.
|
51
|
+
|
52
|
+
Args:
|
53
|
+
text (str): The input text to chunk.
|
54
|
+
chunk_size (int): The size of each chunk.
|
55
|
+
overlap (float): The amount of overlap between chunks.
|
56
|
+
threshold (int): The minimum size of the final chunk.
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
List[Union[str, None]]: A list of text chunks.
|
60
|
+
|
61
|
+
Raises:
|
62
|
+
ValueError: If an error occurs during chunking.
|
63
|
+
"""
|
64
|
+
|
65
|
+
def _chunk_n1():
|
66
|
+
return [input]
|
67
|
+
|
68
|
+
def _chunk_n2():
|
69
|
+
chunks = []
|
70
|
+
chunks.append(input[:chunk_size + overlap_size])
|
71
|
+
|
72
|
+
if len(input) - chunk_size > threshold:
|
73
|
+
chunks.append(input[chunk_size - overlap_size:])
|
74
|
+
else:
|
75
|
+
return _chunk_n1()
|
76
|
+
|
77
|
+
return chunks
|
78
|
+
|
79
|
+
def _chunk_n3():
|
80
|
+
chunks = []
|
81
|
+
chunks.append(input[:chunk_size + overlap_size])
|
82
|
+
for i in range(1, n_chunks - 1):
|
83
|
+
start_idx = chunk_size * i - overlap_size
|
84
|
+
end_idx = chunk_size * (i + 1) + overlap_size
|
85
|
+
chunks.append(input[start_idx:end_idx])
|
86
|
+
|
87
|
+
if len(input) - chunk_size * (n_chunks - 1) > threshold:
|
88
|
+
chunks.append(input[chunk_size * (n_chunks - 1) - overlap_size:])
|
89
|
+
else:
|
90
|
+
chunks[-1] += input[chunk_size * (n_chunks - 1) + overlap_size:]
|
91
|
+
|
92
|
+
return chunks
|
93
|
+
|
94
|
+
try:
|
95
|
+
if not isinstance(input, str): input = str(input)
|
96
|
+
|
97
|
+
n_chunks = math.ceil(len(input) / chunk_size)
|
98
|
+
overlap_size = int(overlap / 2)
|
99
|
+
|
100
|
+
if n_chunks == 1:
|
101
|
+
return _chunk_n1()
|
102
|
+
|
103
|
+
elif n_chunks == 2:
|
104
|
+
return _chunk_n2()
|
105
|
+
|
106
|
+
elif n_chunks > 2:
|
107
|
+
return _chunk_n3()
|
108
|
+
|
109
|
+
except Exception as e:
|
110
|
+
raise ValueError(f"An error occurred while chunking the text. {e}")
|
111
|
+
|
112
|
+
def read_text(filepath: str, clean: bool = True) -> Tuple[str, dict]:
|
113
|
+
"""
|
114
|
+
Reads text from a file and optionally cleans it, returning the content and metadata.
|
115
|
+
|
116
|
+
Args:
|
117
|
+
filepath (str): The path to the file to read.
|
118
|
+
clean (bool): Whether to clean the text by replacing certain characters. Defaults to True.
|
119
|
+
|
120
|
+
Returns:
|
121
|
+
Tuple[str, dict]: A tuple containing the content and metadata of the file.
|
122
|
+
|
123
|
+
Raises:
|
124
|
+
FileNotFoundError: If the file cannot be found.
|
125
|
+
PermissionError: If there are permissions issues.
|
126
|
+
OSError: For other OS-related errors.
|
127
|
+
"""
|
128
|
+
def _get_metadata():
|
129
|
+
import os
|
130
|
+
from datetime import datetime
|
131
|
+
file = filepath
|
132
|
+
size = os.path.getsize(filepath)
|
133
|
+
creation_date = datetime.fromtimestamp(os.path.getctime(filepath)).date()
|
134
|
+
modified_date = datetime.fromtimestamp(os.path.getmtime(filepath)).date()
|
135
|
+
last_accessed_date = datetime.fromtimestamp(os.path.getatime(filepath)).date()
|
136
|
+
return {'file': str(file),
|
137
|
+
'size': size,
|
138
|
+
'creation_date': str(creation_date),
|
139
|
+
'modified_date': str(modified_date),
|
140
|
+
'last_accessed_date': str(last_accessed_date)}
|
141
|
+
try:
|
142
|
+
with open(filepath, 'r') as f:
|
143
|
+
content = f.read()
|
144
|
+
if clean:
|
145
|
+
# Define characters to replace and their replacements
|
146
|
+
replacements = {'\\': ' ', '\n': ' ', '\t': ' ', ' ': ' ', '\'': ' '}
|
147
|
+
for old, new in replacements.items():
|
148
|
+
content = content.replace(old, new)
|
149
|
+
metadata = _get_metadata()
|
150
|
+
return content, metadata
|
151
|
+
except Exception as e:
|
152
|
+
raise e
|
153
|
+
|
154
|
+
def _file_to_chunks(input: Dict[str, Any],
|
155
|
+
field: str = 'content',
|
156
|
+
chunk_size: int = 1500,
|
157
|
+
overlap: float = 0.1,
|
158
|
+
threshold: int = 200) -> List[Dict[str, Any]]:
|
159
|
+
try:
|
160
|
+
out = {key: value for key, value in input.items() if key != field}
|
161
|
+
out.update({"chunk_overlap": overlap, "chunk_threshold": threshold})
|
162
|
+
|
163
|
+
chunks = chunk_text(input[field], chunk_size=chunk_size, overlap=overlap, threshold=threshold)
|
164
|
+
logs = []
|
165
|
+
for i, chunk in enumerate(chunks):
|
166
|
+
chunk_dict = out.copy()
|
167
|
+
chunk_dict.update({
|
168
|
+
'file_chunks': len(chunks),
|
169
|
+
'chunk_id': i + 1,
|
170
|
+
'chunk_size': len(chunk),
|
171
|
+
f'chunk_{field}': chunk
|
172
|
+
})
|
173
|
+
logs.append(chunk_dict)
|
174
|
+
|
175
|
+
return logs
|
176
|
+
|
177
|
+
except Exception as e:
|
178
|
+
raise ValueError(f"An error occurred while chunking the file. {e}")
|
179
|
+
|
180
|
+
def file_to_chunks(input,
|
181
|
+
# project='project',
|
182
|
+
# output_dir='data/logs/sources/',
|
183
|
+
chunk_func = _file_to_chunks, **kwargs):
|
184
|
+
# out_to_csv=False,
|
185
|
+
# filename=None,
|
186
|
+
# verbose=True,
|
187
|
+
# timestamp=True,
|
188
|
+
# logger=None,
|
189
|
+
logs = to_list(lcall(input, chunk_func, **kwargs), flatten=True)
|
190
|
+
return logs
|
@@ -0,0 +1,191 @@
|
|
1
|
+
"""
|
2
|
+
Copyright 2023 HaiyangLi <ocean@lionagi.ai>
|
3
|
+
|
4
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
you may not use this file except in compliance with the License.
|
6
|
+
You may obtain a copy of the License at
|
7
|
+
|
8
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
|
10
|
+
Unless required by applicable law or agreed to in writing, software
|
11
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
See the License for the specific language governing permissions and
|
14
|
+
limitations under the License.
|
15
|
+
"""
|
16
|
+
import os
|
17
|
+
import copy
|
18
|
+
import hashlib
|
19
|
+
from pathlib import Path
|
20
|
+
from datetime import datetime
|
21
|
+
from typing import Any, Generator, List
|
22
|
+
|
23
|
+
def create_copy(input: Any, n: int) -> Any:
|
24
|
+
"""
|
25
|
+
Creates a deep copy of the input object a specified number of times.
|
26
|
+
|
27
|
+
This function makes deep copies of the provided input. If the number of copies ('n')
|
28
|
+
is greater than 1, a list of deep copies is returned. For a single copy, it returns
|
29
|
+
the copy directly.
|
30
|
+
|
31
|
+
Parameters:
|
32
|
+
input (Any): The object to be copied.
|
33
|
+
|
34
|
+
n (int): The number of deep copies to create.
|
35
|
+
|
36
|
+
Raises:
|
37
|
+
ValueError: If 'n' is not a positive integer.
|
38
|
+
|
39
|
+
Returns:
|
40
|
+
Any: A deep copy of 'input' or a list of deep copies if 'n' > 1.
|
41
|
+
|
42
|
+
Example:
|
43
|
+
>>> sample_dict = {'key': 'value'}
|
44
|
+
>>> make_copy(sample_dict, 2)
|
45
|
+
[{'key': 'value'}, {'key': 'value'}]
|
46
|
+
"""
|
47
|
+
if not isinstance(n, int) or n < 1:
|
48
|
+
raise ValueError(f"'n' must be a positive integer: {n}")
|
49
|
+
return copy.deepcopy(input) if n == 1 else [copy.deepcopy(input) for _ in range(n)]
|
50
|
+
|
51
|
+
def create_id(n=32) -> str:
|
52
|
+
"""
|
53
|
+
Generates a unique ID based on the current time and random bytes.
|
54
|
+
|
55
|
+
This function combines the current time in ISO 8601 format with 16 random bytes
|
56
|
+
to create a unique identifier. The result is hashed using SHA-256 and the first
|
57
|
+
16 characters of the hexadecimal digest are returned.
|
58
|
+
|
59
|
+
Returns:
|
60
|
+
str: A 16-character unique identifier.
|
61
|
+
|
62
|
+
Example:
|
63
|
+
>>> create_id() # Doctest: +ELLIPSIS
|
64
|
+
'...'
|
65
|
+
"""
|
66
|
+
current_time = datetime.now().isoformat().encode('utf-8')
|
67
|
+
random_bytes = os.urandom(2048)
|
68
|
+
return hashlib.sha256(current_time + random_bytes).hexdigest()[:n]
|
69
|
+
|
70
|
+
def get_timestamp() -> str:
|
71
|
+
"""
|
72
|
+
Generates a current timestamp in a file-safe string format.
|
73
|
+
|
74
|
+
This function creates a timestamp from the current time, formatted in ISO 8601 format,
|
75
|
+
and replaces characters that are typically problematic in filenames (like colons and periods)
|
76
|
+
with underscores.
|
77
|
+
|
78
|
+
Returns:
|
79
|
+
str: The current timestamp in a file-safe string format.
|
80
|
+
|
81
|
+
Example:
|
82
|
+
>>> get_timestamp() # Doctest: +ELLIPSIS
|
83
|
+
'...'
|
84
|
+
"""
|
85
|
+
return datetime.now().isoformat().replace(":", "_").replace(".", "_")
|
86
|
+
|
87
|
+
def create_path(dir: str, filename: str, timestamp: bool = True, dir_exist_ok: bool = True, time_prefix=False) -> str:
|
88
|
+
"""
|
89
|
+
Creates a file path by optionally appending a timestamp to the filename.
|
90
|
+
|
91
|
+
This function constructs a file path by combining a directory, an optional timestamp,
|
92
|
+
and a filename. It also ensures the existence of the directory.
|
93
|
+
|
94
|
+
Parameters:
|
95
|
+
dir (str): The directory in which the file is to be located.
|
96
|
+
|
97
|
+
filename (str): The name of the file.
|
98
|
+
|
99
|
+
timestamp (bool, optional): If True, appends a timestamp to the filename. Defaults to True.
|
100
|
+
|
101
|
+
dir_exist_ok (bool, optional): If True, creates the directory if it doesn't exist. Defaults to True.
|
102
|
+
|
103
|
+
time_prefix (bool, optional): If True, the timestamp is added as a prefix; otherwise, it's appended. Defaults to False.
|
104
|
+
|
105
|
+
Returns:
|
106
|
+
str: The full path to the file.
|
107
|
+
|
108
|
+
Example:
|
109
|
+
>>> create_path('/tmp/', 'log.txt', timestamp=False)
|
110
|
+
'/tmp/log.txt'
|
111
|
+
"""
|
112
|
+
|
113
|
+
dir = dir + '/' if str(dir)[-1] != '/' else dir
|
114
|
+
filename, ext = filename.split('.')
|
115
|
+
os.makedirs(dir, exist_ok=dir_exist_ok)
|
116
|
+
|
117
|
+
if timestamp:
|
118
|
+
timestamp = get_timestamp()
|
119
|
+
return f"{dir}{timestamp}_{filename}.{ext}" if time_prefix else f"{dir}{filename}_{timestamp}.{ext}"
|
120
|
+
else:
|
121
|
+
return f"{dir}{filename}"
|
122
|
+
|
123
|
+
def split_path(path: Path) -> tuple:
|
124
|
+
folder_name = path.parent.name
|
125
|
+
file_name = path.name
|
126
|
+
return (folder_name, file_name)
|
127
|
+
|
128
|
+
def get_bins(input: List[str], upper: int = 7500) -> List[List[int]]:
|
129
|
+
"""
|
130
|
+
Get index of elements in a list based on their consecutive cumulative sum of length,
|
131
|
+
according to some upper threshold. Return lists of indices as bins.
|
132
|
+
|
133
|
+
Parameters:
|
134
|
+
input (List[str]): List of items to be binned.
|
135
|
+
|
136
|
+
upper (int, optional): Upper threshold for the cumulative sum of the length of items in a bin. Default is 7500.
|
137
|
+
|
138
|
+
Returns:
|
139
|
+
List[List[int]]: List of lists, where each inner list contains the indices of the items that form a bin.
|
140
|
+
|
141
|
+
Example:
|
142
|
+
>>> items = ['apple', 'a', 'b', 'banana', 'cheery', 'c', 'd', 'e']
|
143
|
+
>>> upper = 10
|
144
|
+
>>> get_bins(items, upper)
|
145
|
+
[[0, 1, 2], [3], [4, 5, 6, 7]]
|
146
|
+
"""
|
147
|
+
current = 0
|
148
|
+
bins = []
|
149
|
+
bin = []
|
150
|
+
|
151
|
+
for idx, item in enumerate(input):
|
152
|
+
|
153
|
+
if current + len(item) < upper:
|
154
|
+
bin.append(idx)
|
155
|
+
current += len(item)
|
156
|
+
|
157
|
+
elif current + len(item) >= upper:
|
158
|
+
bins.append(bin)
|
159
|
+
bin = [idx]
|
160
|
+
current = len(item)
|
161
|
+
|
162
|
+
if idx == len(input) - 1 and len(bin) > 0:
|
163
|
+
bins.append(bin)
|
164
|
+
|
165
|
+
return bins
|
166
|
+
|
167
|
+
def task_id_generator() -> Generator[int, None, None]:
|
168
|
+
"""
|
169
|
+
A generator function that yields a sequential series of task IDs.
|
170
|
+
|
171
|
+
Yields:
|
172
|
+
int: The next task ID in the sequence, starting from 0.
|
173
|
+
|
174
|
+
Examples:
|
175
|
+
task_id_gen = task_id_generator()
|
176
|
+
next(task_id_gen) # Yields 0
|
177
|
+
next(task_id_gen) # Yields 1
|
178
|
+
"""
|
179
|
+
task_id = 0
|
180
|
+
while True:
|
181
|
+
yield task_id
|
182
|
+
task_id += 1
|
183
|
+
|
184
|
+
def change_dict_key(dict_, old_key, new_key):
|
185
|
+
dict_[new_key] = dict_.pop(old_key)
|
186
|
+
|
187
|
+
# def parse_function_call(response: str) -> Tuple[str, Dict]:
|
188
|
+
# out = json.loads(response)
|
189
|
+
# func = out.get('function', '').lstrip('call_')
|
190
|
+
# args = json.loads(out.get('arguments', '{}'))
|
191
|
+
# return func, args
|
@@ -0,0 +1,92 @@
|
|
1
|
+
import inspect
|
2
|
+
from ..schema.base_tool import Tool
|
3
|
+
|
4
|
+
|
5
|
+
def extract_docstring_details(func):
|
6
|
+
"""
|
7
|
+
Extracts detailed descriptions for each parameter and the function from the docstring.
|
8
|
+
|
9
|
+
Args:
|
10
|
+
- func (function): The function to extract details from.
|
11
|
+
|
12
|
+
Returns:
|
13
|
+
- Tuple[str, dict]: Function description and a dictionary of parameter descriptions.
|
14
|
+
"""
|
15
|
+
docstring = inspect.getdoc(func)
|
16
|
+
if not docstring:
|
17
|
+
return "No description available.", {}
|
18
|
+
|
19
|
+
# Splitting the docstring into lines
|
20
|
+
lines = docstring.split('\n')
|
21
|
+
|
22
|
+
# Extracting the function description
|
23
|
+
func_description = lines[0].strip()
|
24
|
+
|
25
|
+
# Extracting parameter descriptions
|
26
|
+
param_descriptions = {}
|
27
|
+
current_param = None
|
28
|
+
for line in lines[1:]:
|
29
|
+
line = line.strip()
|
30
|
+
if line.startswith(':param'):
|
31
|
+
_, param, desc = line.split(' ', 2)
|
32
|
+
current_param = param.strip(':')
|
33
|
+
param_descriptions[current_param] = desc
|
34
|
+
elif current_param and line:
|
35
|
+
# Continue the description of the current parameter
|
36
|
+
param_descriptions[current_param] += ' ' + line
|
37
|
+
|
38
|
+
return func_description, param_descriptions
|
39
|
+
|
40
|
+
def func_to_schema(func):
|
41
|
+
"""
|
42
|
+
Generates a schema description for a given function, using typing hints and docstrings.
|
43
|
+
The schema includes the function's name, description, and parameters.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
- func (function): The function to generate a schema for.
|
47
|
+
|
48
|
+
Returns:
|
49
|
+
- dict: A schema describing the function.
|
50
|
+
"""
|
51
|
+
# Extracting function name and docstring details
|
52
|
+
func_name = func.__name__
|
53
|
+
func_description, param_descriptions = extract_docstring_details(func)
|
54
|
+
|
55
|
+
# Extracting parameters with typing hints
|
56
|
+
sig = inspect.signature(func)
|
57
|
+
parameters = {
|
58
|
+
"type": "object",
|
59
|
+
"properties": {},
|
60
|
+
"required": [],
|
61
|
+
}
|
62
|
+
|
63
|
+
for name, param in sig.parameters.items():
|
64
|
+
# Default type to string and update if type hint is available
|
65
|
+
param_type = "string"
|
66
|
+
if param.annotation is not inspect.Parameter.empty:
|
67
|
+
param_type = param.annotation.__name__
|
68
|
+
|
69
|
+
# Extract parameter description from docstring, if available
|
70
|
+
param_description = param_descriptions.get(name, "No description available.")
|
71
|
+
|
72
|
+
# Assuming all parameters are required for simplicity
|
73
|
+
parameters["required"].append(name)
|
74
|
+
parameters["properties"][name] = {
|
75
|
+
"type": param_type,
|
76
|
+
"description": param_description,
|
77
|
+
}
|
78
|
+
|
79
|
+
# Constructing the schema
|
80
|
+
schema = {
|
81
|
+
"type": "function",
|
82
|
+
"function": {
|
83
|
+
"name": func_name,
|
84
|
+
"description": func_description,
|
85
|
+
"parameters": parameters,
|
86
|
+
}
|
87
|
+
}
|
88
|
+
return schema
|
89
|
+
|
90
|
+
def func_to_tool(func_, schema, parser=None):
|
91
|
+
# schema = func_to_schema(func_)
|
92
|
+
return Tool(func=func_, parser=parser, schema_=schema)
|
@@ -0,0 +1,81 @@
|
|
1
|
+
import re
|
2
|
+
from typing import Optional, Union, Iterable, List, Any, Type
|
3
|
+
|
4
|
+
from .flat_util import flatten_list
|
5
|
+
|
6
|
+
|
7
|
+
def str_to_num(input_: str,
|
8
|
+
upper_bound: Optional[Union[int, float]] = None,
|
9
|
+
lower_bound: Optional[Union[int, float]] = None,
|
10
|
+
num_type: Type[Union[int, float]] = int,
|
11
|
+
precision: Optional[int] = None) -> Union[int, float]:
|
12
|
+
"""
|
13
|
+
Converts the first number in the input string to the specified numeric type.
|
14
|
+
|
15
|
+
Args:
|
16
|
+
input_str (str): The input string to extract the number from.
|
17
|
+
|
18
|
+
upper_bound (Optional[Union[int, float]]): The upper bound for the number. Defaults to None.
|
19
|
+
|
20
|
+
lower_bound (Optional[Union[int, float]]): The lower bound for the number. Defaults to None.
|
21
|
+
|
22
|
+
num_type (Type[Union[int, float]]): The type of the number to return (int or float). Defaults to int.
|
23
|
+
|
24
|
+
precision (Optional[int]): The precision for the floating-point number. Defaults to None.
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
Union[int, float]: The converted number.
|
28
|
+
|
29
|
+
Raises:
|
30
|
+
ValueError: If no numeric values are found in the string or if there are conversion errors.
|
31
|
+
"""
|
32
|
+
numbers = re.findall(r'-?\d+\.?\d*', input_)
|
33
|
+
if not numbers:
|
34
|
+
raise ValueError(f"No numeric values found in the string: {input_}")
|
35
|
+
|
36
|
+
try:
|
37
|
+
numbers = numbers[0]
|
38
|
+
if num_type is int:
|
39
|
+
numbers = int(float(numbers))
|
40
|
+
elif num_type is float:
|
41
|
+
numbers = round(float(numbers), precision) if precision is not None else float(numbers)
|
42
|
+
else:
|
43
|
+
raise ValueError(f"Invalid number type: {num_type}")
|
44
|
+
if upper_bound is not None and numbers > upper_bound:
|
45
|
+
raise ValueError(f"Number {numbers} is greater than the upper bound of {upper_bound}.")
|
46
|
+
if lower_bound is not None and numbers < lower_bound:
|
47
|
+
raise ValueError(f"Number {numbers} is less than the lower bound of {lower_bound}.")
|
48
|
+
return numbers
|
49
|
+
|
50
|
+
except ValueError as e:
|
51
|
+
raise ValueError(f"Error converting string to number: {e}")
|
52
|
+
|
53
|
+
def to_list(input_: Any, flatten: bool = True, dropna: bool = False) -> List[Any]:
|
54
|
+
"""
|
55
|
+
Converts the input to a list, optionally flattening it and dropping None values.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
input_item (Any): The input to convert to a list.
|
59
|
+
|
60
|
+
flatten (bool): Whether to flatten the input if it is a nested list. Defaults to True.
|
61
|
+
|
62
|
+
dropna (bool): Whether to drop None values from the list. Defaults to False.
|
63
|
+
|
64
|
+
Returns:
|
65
|
+
List[Any]: The input converted to a list.
|
66
|
+
|
67
|
+
Raises:
|
68
|
+
ValueError: If the input cannot be converted to a list.
|
69
|
+
"""
|
70
|
+
if isinstance(input_, list) and flatten:
|
71
|
+
input_ = flatten_list(input_)
|
72
|
+
if dropna:
|
73
|
+
input_ = [i for i in input_ if i is not None]
|
74
|
+
elif isinstance(input_, Iterable) and not isinstance(input_, (str, dict)):
|
75
|
+
try:
|
76
|
+
input_ = list(input_)
|
77
|
+
except:
|
78
|
+
raise ValueError("Input cannot be converted to a list.")
|
79
|
+
else:
|
80
|
+
input_ = [input_]
|
81
|
+
return input_
|
lionagi/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.0.
|
1
|
+
__version__ = "0.0.113"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: lionagi
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.113
|
4
4
|
Summary: Towards automated general intelligence.
|
5
5
|
Author: HaiyangLi
|
6
6
|
Author-email: Haiyang Li <ocean@lionagi.ai>
|
@@ -220,7 +220,11 @@ Requires-Dist: python-dotenv ==1.0.0
|
|
220
220
|
Requires-Dist: tiktoken ==0.5.1
|
221
221
|
Requires-Dist: httpx ==0.25.1
|
222
222
|
|
223
|
-
![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935)
|
223
|
+
![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935) ![Read the Docs](https://img.shields.io/readthedocs/lionagi) ![PyPI - License](https://img.shields.io/pypi/l/lionagi?color=231fc935) ![PyPI - Downloads](https://img.shields.io/pypi/dm/lionagi?color=blue)
|
224
|
+
|
225
|
+
|
226
|
+
|
227
|
+
|
224
228
|
|
225
229
|
[PyPI](https://pypi.org/project/lionagi/) | [Documentation](https://lionagi.readthedocs.io/en/latest/) | [Discord](https://discord.gg/7RGWqpSxze)
|
226
230
|
|
@@ -239,18 +243,20 @@ Install LionAGI with pip:
|
|
239
243
|
```bash
|
240
244
|
pip install lionagi
|
241
245
|
```
|
242
|
-
Download the `.env_template` file, input your
|
246
|
+
Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
|
247
|
+
by default we use `OPENAI_API_KEY`.
|
243
248
|
|
244
|
-
### Features
|
245
249
|
|
246
|
-
- Robust performance
|
247
|
-
- Efficient data operations for reading, chunking, binning, writing, storing and managing data.
|
248
|
-
- Fast interaction with LLM services like OpenAI with **configurable rate limiting concurrent API calls** for maximum throughput.
|
249
|
-
- Create a production ready LLM application **in hours**. Intuitive workflow management to streamline the process from idea to market.
|
250
|
-
- (Work In Progress): verstile intergration with most API and local LLM services.
|
251
250
|
|
251
|
+
### Features
|
252
|
+
- Create a production ready LLM application **in hours**, with more than 100 models to choose from
|
253
|
+
- written in pure python, minimum dependency `aiohttp`, `python-dotenv`, `tiktoken`, `pydantic`
|
254
|
+
- Efficient and verstile data operations for reading, chunking, binning, writing, storing data with built-in support for `langchain` and `llamaindex`
|
255
|
+
- Unified interface with any LLM provider, API or local
|
256
|
+
- Fast and **concurrent** API call with **configurable rate limit**
|
257
|
+
- (Work In Progress) support for hundreds of models both API and local
|
252
258
|
---
|
253
|
-
LionAGI is designed to be
|
259
|
+
LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
|
254
260
|
|
255
261
|
|
256
262
|
**Notice**:
|
@@ -271,11 +277,11 @@ import lionagi as li
|
|
271
277
|
system = "You are a helpful assistant designed to perform calculations."
|
272
278
|
instruction = {"Addition":"Add the two numbers together i.e. x+y"}
|
273
279
|
context = {"x": 10, "y": 5}
|
280
|
+
```
|
274
281
|
|
275
|
-
|
282
|
+
```python
|
283
|
+
# in interactive environment (.ipynb for example)
|
276
284
|
calculator = li.Session(system=system)
|
277
|
-
|
278
|
-
# run a LLM API call
|
279
285
|
result = await calculator.initiate(instruction=instruction,
|
280
286
|
context=context,
|
281
287
|
model="gpt-4-1106-preview")
|
@@ -283,6 +289,24 @@ result = await calculator.initiate(instruction=instruction,
|
|
283
289
|
print(f"Calculation Result: {result}")
|
284
290
|
```
|
285
291
|
|
292
|
+
```python
|
293
|
+
# or otherwise, you can use
|
294
|
+
import asyncio
|
295
|
+
from dotenv import loadenv
|
296
|
+
|
297
|
+
load_dotenv()
|
298
|
+
|
299
|
+
async def main():
|
300
|
+
calculator = li.Session(system=system)
|
301
|
+
result = await calculator.initiate(instruction=instruction,
|
302
|
+
context=context,
|
303
|
+
model="gpt-4-1106-preview")
|
304
|
+
print(f"Calculation Result: {result}")
|
305
|
+
|
306
|
+
if __name__ == "__main__":
|
307
|
+
asyncio.run(main())
|
308
|
+
```
|
309
|
+
|
286
310
|
Visit our notebooks for our examples.
|
287
311
|
|
288
312
|
### Community
|