lionagi 0.0.112__tar.gz → 0.0.113__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {lionagi-0.0.112 → lionagi-0.0.113}/PKG-INFO +37 -13
- {lionagi-0.0.112 → lionagi-0.0.113}/README.md +36 -12
- {lionagi-0.0.112 → lionagi-0.0.113}/lionagi/__init__.py +3 -3
- lionagi-0.0.113/lionagi/bridge/__init__.py +7 -0
- lionagi-0.0.113/lionagi/bridge/langchain.py +131 -0
- lionagi-0.0.113/lionagi/bridge/llama_index.py +157 -0
- lionagi-0.0.113/lionagi/configs/__init__.py +7 -0
- lionagi-0.0.113/lionagi/configs/oai_configs.py +49 -0
- lionagi-0.0.113/lionagi/configs/openrouter_config.py +49 -0
- lionagi-0.0.113/lionagi/core/__init__.py +15 -0
- lionagi-0.0.113/lionagi/core/instruction_sets.py +1 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/lionagi/core/messages.py +2 -2
- lionagi-0.0.113/lionagi/core/sessions.py +262 -0
- lionagi-0.0.113/lionagi/loader/__init__.py +12 -0
- lionagi-0.0.113/lionagi/loader/chunker.py +157 -0
- lionagi-0.0.113/lionagi/loader/reader.py +124 -0
- lionagi-0.0.113/lionagi/objs/__init__.py +7 -0
- lionagi-0.0.113/lionagi/objs/messenger.py +163 -0
- lionagi-0.0.113/lionagi/objs/tool_registry.py +247 -0
- lionagi-0.0.113/lionagi/schema/__init__.py +11 -0
- lionagi-0.0.113/lionagi/schema/base_schema.py +239 -0
- lionagi-0.0.113/lionagi/schema/base_tool.py +9 -0
- lionagi-0.0.113/lionagi/schema/data_logger.py +94 -0
- lionagi-0.0.113/lionagi/services/__init__.py +14 -0
- lionagi-0.0.112/lionagi/service_/oai.py → lionagi-0.0.113/lionagi/services/base_api_service.py +49 -82
- lionagi-0.0.112/lionagi/endpoint/base_endpoint.py → lionagi-0.0.113/lionagi/services/chatcompletion.py +19 -22
- lionagi-0.0.113/lionagi/services/oai.py +34 -0
- lionagi-0.0.113/lionagi/services/openrouter.py +32 -0
- lionagi-0.0.112/lionagi/service_/service_utils.py → lionagi-0.0.113/lionagi/services/service_objs.py +0 -1
- lionagi-0.0.113/lionagi/structure/__init__.py +7 -0
- lionagi-0.0.113/lionagi/structure/relationship.py +128 -0
- lionagi-0.0.113/lionagi/structure/structure.py +160 -0
- lionagi-0.0.113/lionagi/tests/test_flatten_util.py +426 -0
- lionagi-0.0.113/lionagi/tools/scorer.py +1 -0
- lionagi-0.0.113/lionagi/tools/summarizer.py +1 -0
- lionagi-0.0.113/lionagi/tools/validator.py +1 -0
- lionagi-0.0.113/lionagi/utils/__init__.py +49 -0
- lionagi-0.0.113/lionagi/utils/api_util.py +86 -0
- lionagi-0.0.113/lionagi/utils/call_util.py +347 -0
- lionagi-0.0.113/lionagi/utils/flat_util.py +540 -0
- lionagi-0.0.113/lionagi/utils/io_util.py +102 -0
- lionagi-0.0.113/lionagi/utils/load_utils.py +190 -0
- lionagi-0.0.113/lionagi/utils/sys_util.py +191 -0
- lionagi-0.0.113/lionagi/utils/tool_util.py +92 -0
- lionagi-0.0.113/lionagi/utils/type_util.py +81 -0
- lionagi-0.0.113/lionagi/version.py +1 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/lionagi.egg-info/PKG-INFO +37 -13
- lionagi-0.0.113/lionagi.egg-info/SOURCES.txt +89 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/setup.py +0 -2
- lionagi-0.0.112/lionagi/core/__init__.py +0 -9
- lionagi-0.0.112/lionagi/core/instruction_sets.py +0 -3
- lionagi-0.0.112/lionagi/core/sessions.py +0 -115
- lionagi-0.0.112/lionagi/endpoint/chat_completion.py +0 -20
- lionagi-0.0.112/lionagi/endpoint/endpoint_utils.py +0 -0
- lionagi-0.0.112/lionagi/llm_configs.py +0 -21
- lionagi-0.0.112/lionagi/loader/__init__.py +0 -7
- lionagi-0.0.112/lionagi/loader/chunker.py +0 -0
- lionagi-0.0.112/lionagi/loader/load_utils.py +0 -161
- lionagi-0.0.112/lionagi/loader/reader.py +0 -0
- lionagi-0.0.112/lionagi/schema.py +0 -275
- lionagi-0.0.112/lionagi/service_/__init__.py +0 -6
- lionagi-0.0.112/lionagi/service_/base_service.py +0 -48
- lionagi-0.0.112/lionagi/services.py +0 -1
- lionagi-0.0.112/lionagi/tools/__init__.py +0 -5
- lionagi-0.0.112/lionagi/tools/coder.py +0 -0
- lionagi-0.0.112/lionagi/tools/scorer.py +0 -0
- lionagi-0.0.112/lionagi/tools/tool_utils.py +0 -75
- lionagi-0.0.112/lionagi/tools/validator.py +0 -0
- lionagi-0.0.112/lionagi/utils/__init__.py +0 -23
- lionagi-0.0.112/lionagi/utils/sys_utils.py +0 -799
- lionagi-0.0.112/lionagi/version.py +0 -1
- lionagi-0.0.112/lionagi.egg-info/SOURCES.txt +0 -72
- {lionagi-0.0.112 → lionagi-0.0.113}/LICENSE +0 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/README.rst +0 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/lionagi/core/conversations.py +0 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/lionagi/core/flows.py +0 -0
- /lionagi-0.0.112/lionagi/core/responses.py → /lionagi-0.0.113/lionagi/datastore/__init__.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/assistants.py → /lionagi-0.0.113/lionagi/datastore/chroma.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/audio.py → /lionagi-0.0.113/lionagi/datastore/deeplake.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/embeddings.py → /lionagi-0.0.113/lionagi/datastore/elasticsearch.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/files.py → /lionagi-0.0.113/lionagi/datastore/lantern.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/fine_tuning.py → /lionagi-0.0.113/lionagi/datastore/pinecone.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/images.py → /lionagi-0.0.113/lionagi/datastore/postgres.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/messages.py → /lionagi-0.0.113/lionagi/datastore/qdrant.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/models.py → /lionagi-0.0.113/lionagi/schema/base_condition.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/anthropic.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/anyscale.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/azure.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/bedrock.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/everlyai.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/gemini.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/gpt4all.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/huggingface.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/litellm.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/localai.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/mistralai.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/ollama.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/openllm.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/perplexity.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/predibase.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/rungpt.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/vllm.py +0 -0
- {lionagi-0.0.112/lionagi/service_ → lionagi-0.0.113/lionagi/services}/xinference.py +0 -0
- {lionagi-0.0.112/lionagi/datastore → lionagi-0.0.113/lionagi/tests}/__init__.py +0 -0
- {lionagi-0.0.112/lionagi/endpoint → lionagi-0.0.113/lionagi/tools}/__init__.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/moderations.py → /lionagi-0.0.113/lionagi/tools/coder.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/runs.py → /lionagi-0.0.113/lionagi/tools/planner.py +0 -0
- /lionagi-0.0.112/lionagi/endpoint/threads.py → /lionagi-0.0.113/lionagi/tools/prompter.py +0 -0
- /lionagi-0.0.112/lionagi/service_/openrouter.py → /lionagi-0.0.113/lionagi/tools/sandbox.py +0 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/lionagi.egg-info/dependency_links.txt +0 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/lionagi.egg-info/requires.txt +0 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/lionagi.egg-info/top_level.txt +0 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/pyproject.toml +0 -0
- {lionagi-0.0.112 → lionagi-0.0.113}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: lionagi
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.113
|
4
4
|
Summary: Towards automated general intelligence.
|
5
5
|
Author: HaiyangLi
|
6
6
|
Author-email: Haiyang Li <ocean@lionagi.ai>
|
@@ -220,7 +220,11 @@ Requires-Dist: python-dotenv==1.0.0
|
|
220
220
|
Requires-Dist: tiktoken==0.5.1
|
221
221
|
Requires-Dist: httpx==0.25.1
|
222
222
|
|
223
|
-
![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935)
|
223
|
+
![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935) ![Read the Docs](https://img.shields.io/readthedocs/lionagi) ![PyPI - License](https://img.shields.io/pypi/l/lionagi?color=231fc935) ![PyPI - Downloads](https://img.shields.io/pypi/dm/lionagi?color=blue)
|
224
|
+
|
225
|
+
|
226
|
+
|
227
|
+
|
224
228
|
|
225
229
|
[PyPI](https://pypi.org/project/lionagi/) | [Documentation](https://lionagi.readthedocs.io/en/latest/) | [Discord](https://discord.gg/7RGWqpSxze)
|
226
230
|
|
@@ -239,18 +243,20 @@ Install LionAGI with pip:
|
|
239
243
|
```bash
|
240
244
|
pip install lionagi
|
241
245
|
```
|
242
|
-
Download the `.env_template` file, input your
|
246
|
+
Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
|
247
|
+
by default we use `OPENAI_API_KEY`.
|
243
248
|
|
244
|
-
### Features
|
245
249
|
|
246
|
-
- Robust performance
|
247
|
-
- Efficient data operations for reading, chunking, binning, writing, storing and managing data.
|
248
|
-
- Fast interaction with LLM services like OpenAI with **configurable rate limiting concurrent API calls** for maximum throughput.
|
249
|
-
- Create a production ready LLM application **in hours**. Intuitive workflow management to streamline the process from idea to market.
|
250
|
-
- (Work In Progress): verstile intergration with most API and local LLM services.
|
251
250
|
|
251
|
+
### Features
|
252
|
+
- Create a production ready LLM application **in hours**, with more than 100 models to choose from
|
253
|
+
- written in pure python, minimum dependency `aiohttp`, `python-dotenv`, `tiktoken`, `pydantic`
|
254
|
+
- Efficient and verstile data operations for reading, chunking, binning, writing, storing data with built-in support for `langchain` and `llamaindex`
|
255
|
+
- Unified interface with any LLM provider, API or local
|
256
|
+
- Fast and **concurrent** API call with **configurable rate limit**
|
257
|
+
- (Work In Progress) support for hundreds of models both API and local
|
252
258
|
---
|
253
|
-
LionAGI is designed to be
|
259
|
+
LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
|
254
260
|
|
255
261
|
|
256
262
|
**Notice**:
|
@@ -271,11 +277,11 @@ import lionagi as li
|
|
271
277
|
system = "You are a helpful assistant designed to perform calculations."
|
272
278
|
instruction = {"Addition":"Add the two numbers together i.e. x+y"}
|
273
279
|
context = {"x": 10, "y": 5}
|
280
|
+
```
|
274
281
|
|
275
|
-
|
282
|
+
```python
|
283
|
+
# in interactive environment (.ipynb for example)
|
276
284
|
calculator = li.Session(system=system)
|
277
|
-
|
278
|
-
# run a LLM API call
|
279
285
|
result = await calculator.initiate(instruction=instruction,
|
280
286
|
context=context,
|
281
287
|
model="gpt-4-1106-preview")
|
@@ -283,6 +289,24 @@ result = await calculator.initiate(instruction=instruction,
|
|
283
289
|
print(f"Calculation Result: {result}")
|
284
290
|
```
|
285
291
|
|
292
|
+
```python
|
293
|
+
# or otherwise, you can use
|
294
|
+
import asyncio
|
295
|
+
from dotenv import loadenv
|
296
|
+
|
297
|
+
load_dotenv()
|
298
|
+
|
299
|
+
async def main():
|
300
|
+
calculator = li.Session(system=system)
|
301
|
+
result = await calculator.initiate(instruction=instruction,
|
302
|
+
context=context,
|
303
|
+
model="gpt-4-1106-preview")
|
304
|
+
print(f"Calculation Result: {result}")
|
305
|
+
|
306
|
+
if __name__ == "__main__":
|
307
|
+
asyncio.run(main())
|
308
|
+
```
|
309
|
+
|
286
310
|
Visit our notebooks for our examples.
|
287
311
|
|
288
312
|
### Community
|
@@ -1,4 +1,8 @@
|
|
1
|
-
![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935)
|
1
|
+
![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935) ![Read the Docs](https://img.shields.io/readthedocs/lionagi) ![PyPI - License](https://img.shields.io/pypi/l/lionagi?color=231fc935) ![PyPI - Downloads](https://img.shields.io/pypi/dm/lionagi?color=blue)
|
2
|
+
|
3
|
+
|
4
|
+
|
5
|
+
|
2
6
|
|
3
7
|
[PyPI](https://pypi.org/project/lionagi/) | [Documentation](https://lionagi.readthedocs.io/en/latest/) | [Discord](https://discord.gg/7RGWqpSxze)
|
4
8
|
|
@@ -17,18 +21,20 @@ Install LionAGI with pip:
|
|
17
21
|
```bash
|
18
22
|
pip install lionagi
|
19
23
|
```
|
20
|
-
Download the `.env_template` file, input your
|
24
|
+
Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
|
25
|
+
by default we use `OPENAI_API_KEY`.
|
21
26
|
|
22
|
-
### Features
|
23
27
|
|
24
|
-
- Robust performance
|
25
|
-
- Efficient data operations for reading, chunking, binning, writing, storing and managing data.
|
26
|
-
- Fast interaction with LLM services like OpenAI with **configurable rate limiting concurrent API calls** for maximum throughput.
|
27
|
-
- Create a production ready LLM application **in hours**. Intuitive workflow management to streamline the process from idea to market.
|
28
|
-
- (Work In Progress): verstile intergration with most API and local LLM services.
|
29
28
|
|
29
|
+
### Features
|
30
|
+
- Create a production ready LLM application **in hours**, with more than 100 models to choose from
|
31
|
+
- written in pure python, minimum dependency `aiohttp`, `python-dotenv`, `tiktoken`, `pydantic`
|
32
|
+
- Efficient and verstile data operations for reading, chunking, binning, writing, storing data with built-in support for `langchain` and `llamaindex`
|
33
|
+
- Unified interface with any LLM provider, API or local
|
34
|
+
- Fast and **concurrent** API call with **configurable rate limit**
|
35
|
+
- (Work In Progress) support for hundreds of models both API and local
|
30
36
|
---
|
31
|
-
LionAGI is designed to be
|
37
|
+
LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
|
32
38
|
|
33
39
|
|
34
40
|
**Notice**:
|
@@ -49,11 +55,11 @@ import lionagi as li
|
|
49
55
|
system = "You are a helpful assistant designed to perform calculations."
|
50
56
|
instruction = {"Addition":"Add the two numbers together i.e. x+y"}
|
51
57
|
context = {"x": 10, "y": 5}
|
58
|
+
```
|
52
59
|
|
53
|
-
|
60
|
+
```python
|
61
|
+
# in interactive environment (.ipynb for example)
|
54
62
|
calculator = li.Session(system=system)
|
55
|
-
|
56
|
-
# run a LLM API call
|
57
63
|
result = await calculator.initiate(instruction=instruction,
|
58
64
|
context=context,
|
59
65
|
model="gpt-4-1106-preview")
|
@@ -61,6 +67,24 @@ result = await calculator.initiate(instruction=instruction,
|
|
61
67
|
print(f"Calculation Result: {result}")
|
62
68
|
```
|
63
69
|
|
70
|
+
```python
|
71
|
+
# or otherwise, you can use
|
72
|
+
import asyncio
|
73
|
+
from dotenv import loadenv
|
74
|
+
|
75
|
+
load_dotenv()
|
76
|
+
|
77
|
+
async def main():
|
78
|
+
calculator = li.Session(system=system)
|
79
|
+
result = await calculator.initiate(instruction=instruction,
|
80
|
+
context=context,
|
81
|
+
model="gpt-4-1106-preview")
|
82
|
+
print(f"Calculation Result: {result}")
|
83
|
+
|
84
|
+
if __name__ == "__main__":
|
85
|
+
asyncio.run(main())
|
86
|
+
```
|
87
|
+
|
64
88
|
Visit our notebooks for our examples.
|
65
89
|
|
66
90
|
### Community
|
@@ -18,10 +18,10 @@ import logging
|
|
18
18
|
from .version import __version__
|
19
19
|
|
20
20
|
from .utils import *
|
21
|
-
from .
|
22
|
-
from .
|
23
|
-
from .service_ import *
|
21
|
+
from .schema import *
|
22
|
+
from .structure import *
|
24
23
|
from .core import *
|
24
|
+
from .objs import *
|
25
25
|
# from .datastore import *
|
26
26
|
# from .structure import *
|
27
27
|
|
@@ -0,0 +1,131 @@
|
|
1
|
+
from typing import Union, Callable, List, Dict, Any
|
2
|
+
from ..schema.base_schema import T, DataNode
|
3
|
+
from ..utils.sys_util import change_dict_key
|
4
|
+
|
5
|
+
|
6
|
+
def from_langchain(lc_doc: Any) -> T:
|
7
|
+
"""
|
8
|
+
Converts a langchain document into a DataNode object.
|
9
|
+
|
10
|
+
Parameters:
|
11
|
+
lc_doc (Any): The langchain document to be converted.
|
12
|
+
|
13
|
+
Returns:
|
14
|
+
DataNode: A DataNode object created from the langchain document.
|
15
|
+
"""
|
16
|
+
info_json = lc_doc.to_json()
|
17
|
+
info_node = {'lc_id': info_json['id']}
|
18
|
+
info_node = {**info_node, **info_json['kwargs']}
|
19
|
+
return DataNode(**info_node)
|
20
|
+
|
21
|
+
def to_langchain_document(datanode: T, **kwargs: Any) -> Any:
|
22
|
+
"""
|
23
|
+
Converts a DataNode into a langchain Document.
|
24
|
+
|
25
|
+
Parameters:
|
26
|
+
datanode (DataNode): The DataNode to be converted.
|
27
|
+
|
28
|
+
**kwargs: Additional keyword arguments to be included in the Document.
|
29
|
+
|
30
|
+
Returns:
|
31
|
+
Any: A langchain Document created from the DataNode.
|
32
|
+
"""
|
33
|
+
from langchain.schema import Document
|
34
|
+
|
35
|
+
dnode = datanode.to_dict()
|
36
|
+
change_dict_key(dnode, old_key='content', new_key='page_content')
|
37
|
+
change_dict_key(dnode, old_key='lc_id', new_key='id_')
|
38
|
+
dnode = {**dnode, **kwargs}
|
39
|
+
return Document(**dnode)
|
40
|
+
|
41
|
+
def langchain_loader(loader: Union[str, Callable],
|
42
|
+
loader_args: List[Any] = [],
|
43
|
+
loader_kwargs: Dict[str, Any] = {}) -> Any:
|
44
|
+
"""
|
45
|
+
Loads data using a specified langchain loader.
|
46
|
+
|
47
|
+
Parameters:
|
48
|
+
loader (Union[str, Callable]): The name of the loader function or the loader function itself.
|
49
|
+
|
50
|
+
loader_args (List[Any]): Positional arguments to pass to the loader function.
|
51
|
+
|
52
|
+
loader_kwargs (Dict[str, Any]): Keyword arguments to pass to the loader function.
|
53
|
+
|
54
|
+
Returns:
|
55
|
+
Any: The data loaded by the loader function.
|
56
|
+
|
57
|
+
Raises:
|
58
|
+
ValueError: If the specified loader is invalid or if the loader fails to load data.
|
59
|
+
"""
|
60
|
+
import langchain.document_loaders as document_loaders
|
61
|
+
|
62
|
+
try:
|
63
|
+
if isinstance(loader, str):
|
64
|
+
loader = getattr(document_loaders, loader)
|
65
|
+
else:
|
66
|
+
loader = loader
|
67
|
+
except Exception as e:
|
68
|
+
raise ValueError(f'Invalid loader: {loader}. Error: {e}')
|
69
|
+
|
70
|
+
try:
|
71
|
+
loader_obj = loader(*loader_args, **loader_kwargs)
|
72
|
+
data = loader_obj.load()
|
73
|
+
return data
|
74
|
+
except Exception as e:
|
75
|
+
raise ValueError(f'Failed to load. Error: {e}')
|
76
|
+
|
77
|
+
def langchain_text_splitter(data: Union[str, List],
|
78
|
+
splitter: Union[str, Callable],
|
79
|
+
splitter_args: List[Any] = [],
|
80
|
+
splitter_kwargs: Dict[str, Any] = {}) -> List[str]:
|
81
|
+
|
82
|
+
import langchain.text_splitter as text_splitter
|
83
|
+
|
84
|
+
try:
|
85
|
+
if isinstance(splitter, str):
|
86
|
+
splitter = getattr(text_splitter, splitter)
|
87
|
+
else:
|
88
|
+
splitter = splitter
|
89
|
+
except Exception as e:
|
90
|
+
raise ValueError(f'Invalid text splitter: {splitter}. Error: {e}')
|
91
|
+
|
92
|
+
try:
|
93
|
+
splitter_obj = splitter(*splitter_args, **splitter_kwargs)
|
94
|
+
if isinstance(data, str):
|
95
|
+
chunk = splitter_obj.split_text(data)
|
96
|
+
else:
|
97
|
+
chunk = splitter_obj.split_documents(data)
|
98
|
+
return chunk
|
99
|
+
except Exception as e:
|
100
|
+
raise ValueError(f'Failed to split. Error: {e}')
|
101
|
+
|
102
|
+
# def langchain_code_splitter(doc: str,
|
103
|
+
# language: str,
|
104
|
+
# splitter_args: List[Any] = [],
|
105
|
+
# splitter_kwargs: Dict[str, Any] = {}) -> List[Any]:
|
106
|
+
# """
|
107
|
+
# Splits code into smaller chunks using a RecursiveCharacterTextSplitter specific to a language.
|
108
|
+
#
|
109
|
+
# Parameters:
|
110
|
+
# doc (str): The code document to be split.
|
111
|
+
# language (str): The programming language of the code.
|
112
|
+
# splitter_args (List[Any]): Positional arguments to pass to the splitter.
|
113
|
+
# splitter_kwargs (Dict[str, Any]): Keyword arguments to pass to the splitter.
|
114
|
+
#
|
115
|
+
# Returns:
|
116
|
+
# List[Any]: A list of Documents, each representing a chunk of the original code.
|
117
|
+
#
|
118
|
+
# Raises:
|
119
|
+
# ValueError: If the splitter fails to split the code document.
|
120
|
+
# """
|
121
|
+
# from langchain.text_splitter import RecursiveCharacterTextSplitter
|
122
|
+
#
|
123
|
+
# try:
|
124
|
+
# splitter = RecursiveCharacterTextSplitter.from_language(
|
125
|
+
# language=language, *splitter_args, **splitter_kwargs
|
126
|
+
# )
|
127
|
+
# docs = splitter.create_documents([doc])
|
128
|
+
# return docs
|
129
|
+
# except Exception as e:
|
130
|
+
# raise ValueError(f'Failed to split. Error: {e}')
|
131
|
+
#
|
@@ -0,0 +1,157 @@
|
|
1
|
+
from typing import Union, Callable, List, Any, Dict
|
2
|
+
from ..schema.base_schema import DataNode, T
|
3
|
+
from ..utils.sys_util import change_dict_key
|
4
|
+
|
5
|
+
|
6
|
+
def from_llama_index(llama_node: Any, **kwargs: Any) -> T:
|
7
|
+
"""
|
8
|
+
Converts a Llama Index node into a DataNode object.
|
9
|
+
|
10
|
+
Parameters:
|
11
|
+
llama_node (Any): The Llama Index node to be converted.
|
12
|
+
|
13
|
+
**kwargs: Additional keyword arguments for JSON serialization.
|
14
|
+
|
15
|
+
Returns:
|
16
|
+
DataNode: A DataNode object created from the Llama Index node.
|
17
|
+
"""
|
18
|
+
llama_dict = llama_node.to_dict(**kwargs)
|
19
|
+
return DataNode.from_dict(llama_dict)
|
20
|
+
|
21
|
+
def to_llama_index_textnode(datanode: T, **kwargs: Any) -> Any:
|
22
|
+
"""
|
23
|
+
Converts a DataNode into a Llama Index TextNode.
|
24
|
+
|
25
|
+
Parameters:
|
26
|
+
datanode (DataNode): The DataNode to be converted.
|
27
|
+
|
28
|
+
**kwargs: Additional keyword arguments to be included in the TextNode.
|
29
|
+
|
30
|
+
Returns:
|
31
|
+
TextNode: A Llama Index TextNode created from the DataNode.
|
32
|
+
"""
|
33
|
+
# to llama_index textnode
|
34
|
+
from llama_index.schema import TextNode
|
35
|
+
|
36
|
+
dnode = datanode.to_dict()
|
37
|
+
change_dict_key(dnode, old_key='content', new_key='text')
|
38
|
+
change_dict_key(dnode, old_key='node_id', new_key='id_')
|
39
|
+
|
40
|
+
dnode = {**dnode, **kwargs}
|
41
|
+
return TextNode.from_dict(dnode)
|
42
|
+
|
43
|
+
def get_llama_reader(reader: Union[str, Callable]) -> Callable:
|
44
|
+
"""
|
45
|
+
Gets a Llama Index reader function.
|
46
|
+
|
47
|
+
Parameters:
|
48
|
+
reader (Union[str, Callable]): The name of the reader function or the reader function itself.
|
49
|
+
|
50
|
+
Returns:
|
51
|
+
Callable: The Llama Index reader function.
|
52
|
+
|
53
|
+
Raises:
|
54
|
+
ValueError: If the specified reader is invalid.
|
55
|
+
"""
|
56
|
+
try:
|
57
|
+
if isinstance(reader, str):
|
58
|
+
if reader == 'SimpleDirectoryReader':
|
59
|
+
from llama_index import SimpleDirectoryReader
|
60
|
+
return SimpleDirectoryReader
|
61
|
+
else:
|
62
|
+
from llama_index import download_loader
|
63
|
+
return download_loader(reader)
|
64
|
+
else:
|
65
|
+
return reader
|
66
|
+
except Exception as e:
|
67
|
+
raise ValueError(f'Invalid reader: {reader}, Error: {e}')
|
68
|
+
|
69
|
+
def llama_index_reader(reader: Union[str, Callable],
|
70
|
+
reader_args: List[Any] = [],
|
71
|
+
reader_kwargs: Dict[str, Any] = {},
|
72
|
+
load_data_args: List[Any] = [],
|
73
|
+
load_data_kwargs: Dict[str, Any] = {}) -> List[Any]:
|
74
|
+
"""
|
75
|
+
Loads documents using a specified Llama Index reader.
|
76
|
+
|
77
|
+
Parameters:
|
78
|
+
reader (Union[str, Callable]): The name of the reader function or the reader function itself.
|
79
|
+
|
80
|
+
reader_args (List[Any]): Positional arguments to pass to the reader function.
|
81
|
+
|
82
|
+
reader_kwargs (Dict[str, Any]): Keyword arguments to pass to the reader function.
|
83
|
+
|
84
|
+
load_data_args (List[Any]): Positional arguments for the load_data method.
|
85
|
+
|
86
|
+
load_data_kwargs (Dict[str, Any]): Keyword arguments for the load_data method.
|
87
|
+
|
88
|
+
Returns:
|
89
|
+
List[Any]: A list of documents loaded by the reader.
|
90
|
+
|
91
|
+
Raises:
|
92
|
+
ValueError: If the specified reader is invalid or if the reader fails to load documents.
|
93
|
+
"""
|
94
|
+
reader = get_llama_reader(reader)
|
95
|
+
|
96
|
+
try:
|
97
|
+
loader = reader(*reader_args, **reader_kwargs)
|
98
|
+
documents = loader.load_data(*load_data_args, **load_data_kwargs)
|
99
|
+
return documents
|
100
|
+
|
101
|
+
except Exception as e:
|
102
|
+
raise ValueError(f'Failed to read. Error: {e}')
|
103
|
+
|
104
|
+
def get_llama_parser(parser: Union[str, Callable]) -> Callable:
|
105
|
+
import llama_index.node_parser as node_parser
|
106
|
+
import llama_index.text_splitter as text_splitter
|
107
|
+
|
108
|
+
try:
|
109
|
+
return getattr(node_parser, parser)
|
110
|
+
except Exception as e1:
|
111
|
+
try:
|
112
|
+
if isinstance(parser, str):
|
113
|
+
return getattr(text_splitter, parser)
|
114
|
+
else:
|
115
|
+
return parser
|
116
|
+
except Exception as e2:
|
117
|
+
raise ValueError(f'Invalid node parser: {parser}. Error: {e1}, {e2}')
|
118
|
+
|
119
|
+
|
120
|
+
def llama_index_node_parser(documents: List[Any],
|
121
|
+
parser: Union[str, Callable],
|
122
|
+
parser_args: List[Any] = [],
|
123
|
+
parser_kwargs: Dict[str, Any] = {},
|
124
|
+
parsing_kwargs: Dict[str, Any] = {}) -> List[Any]:
|
125
|
+
"""
|
126
|
+
Parses documents into nodes using a specified Llama Index node parser.
|
127
|
+
|
128
|
+
Parameters:
|
129
|
+
documents (List[Any]): The documents to parse.
|
130
|
+
|
131
|
+
parser (Union[str, Callable]): The name of the parser function or the parser function itself.
|
132
|
+
|
133
|
+
parser_args (List[Any]): Positional arguments to pass to the parser function.
|
134
|
+
|
135
|
+
parser_kwargs (Dict[str, Any]): Keyword arguments to pass to the parser function.
|
136
|
+
|
137
|
+
Returns:
|
138
|
+
List[Any]: A list of nodes parsed from the documents.
|
139
|
+
|
140
|
+
Raises:
|
141
|
+
ValueError: If the specified parser is invalid or if the parser fails to parse the documents.
|
142
|
+
"""
|
143
|
+
parser = get_llama_parser(parser)
|
144
|
+
|
145
|
+
try:
|
146
|
+
parser_obj = parser(*parser_args, **parser_kwargs)
|
147
|
+
nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)
|
148
|
+
return nodes
|
149
|
+
|
150
|
+
except Exception as e1:
|
151
|
+
try:
|
152
|
+
parser_obj = parser.from_defaults(*parser_args, **parser_kwargs)
|
153
|
+
nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)
|
154
|
+
return nodes
|
155
|
+
except Exception as e2:
|
156
|
+
raise ValueError(f'Failed to parse. Error: {e1}, {e2}')
|
157
|
+
|
@@ -0,0 +1,49 @@
|
|
1
|
+
oai_chat_llmconfig = {
|
2
|
+
"model": "gpt-4-1106-preview",
|
3
|
+
"frequency_penalty": 0,
|
4
|
+
"max_tokens": None,
|
5
|
+
"n": 1,
|
6
|
+
"presence_penalty": 0,
|
7
|
+
"response_format": {"type": "text"},
|
8
|
+
"seed": None,
|
9
|
+
"stop": None,
|
10
|
+
"stream": False,
|
11
|
+
"temperature": 0.7,
|
12
|
+
"top_p": 1,
|
13
|
+
"tools": None,
|
14
|
+
"tool_choice": "none",
|
15
|
+
"user": None
|
16
|
+
}
|
17
|
+
|
18
|
+
oai_chat_schema = {
|
19
|
+
"required" : ["model", "frequency_penalty", "n", "presence_penalty", "response_format", "temperature", "top_p"],
|
20
|
+
"optional": ["seed", "stop", "stream", "tools", "tool_choice", "user", "max_tokens"],
|
21
|
+
"input": "messages",
|
22
|
+
"config": oai_chat_llmconfig
|
23
|
+
}
|
24
|
+
|
25
|
+
oai_finetune_llmconfig = {
|
26
|
+
"model": "gpt-3.5-turbo",
|
27
|
+
"hyperparameters": {
|
28
|
+
"batch_size": "auto",
|
29
|
+
"learning_rate_multiplier": "auto",
|
30
|
+
"n_epochs": "auto"
|
31
|
+
},
|
32
|
+
"suffix": None,
|
33
|
+
"training_file": None,
|
34
|
+
}
|
35
|
+
|
36
|
+
oai_finetune_schema = {
|
37
|
+
"required" : ["model", "training_file"],
|
38
|
+
"optional": ["hyperparameters", "suffix", "validate_file"],
|
39
|
+
"input": ["training_file"],
|
40
|
+
"config": oai_finetune_llmconfig
|
41
|
+
}
|
42
|
+
|
43
|
+
|
44
|
+
oai_schema = {
|
45
|
+
|
46
|
+
"chat": oai_chat_schema,
|
47
|
+
"finetune": oai_finetune_schema
|
48
|
+
|
49
|
+
}
|
@@ -0,0 +1,49 @@
|
|
1
|
+
openrouter_chat_llmconfig = {
|
2
|
+
"model": "gpt-4-1106-preview",
|
3
|
+
"frequency_penalty": 0,
|
4
|
+
"max_tokens": None,
|
5
|
+
"n": 1,
|
6
|
+
"presence_penalty": 0,
|
7
|
+
"response_format": {"type": "text"},
|
8
|
+
"seed": None,
|
9
|
+
"stop": None,
|
10
|
+
"stream": False,
|
11
|
+
"temperature": 0.7,
|
12
|
+
"top_p": 1,
|
13
|
+
"tools": None,
|
14
|
+
"tool_choice": "none",
|
15
|
+
"user": None
|
16
|
+
}
|
17
|
+
|
18
|
+
openrouter_chat_schema = {
|
19
|
+
"required" : ["model", "frequency_penalty", "n", "presence_penalty", "response_format", "temperature", "top_p"],
|
20
|
+
"optional": ["seed", "stop", "stream", "tools", "tool_choice", "user", "max_tokens"],
|
21
|
+
"input": "messages",
|
22
|
+
"config": openrouter_chat_llmconfig
|
23
|
+
}
|
24
|
+
|
25
|
+
openrouter_finetune_llmconfig = {
|
26
|
+
"model": "gpt-3.5-turbo",
|
27
|
+
"hyperparameters": {
|
28
|
+
"batch_size": "auto",
|
29
|
+
"learning_rate_multiplier": "auto",
|
30
|
+
"n_epochs": "auto"
|
31
|
+
},
|
32
|
+
"suffix": None,
|
33
|
+
"training_file": None,
|
34
|
+
}
|
35
|
+
|
36
|
+
openrouter_finetune_schema = {
|
37
|
+
"required" : ["model", "training_file"],
|
38
|
+
"optional": ["hyperparameters", "suffix", "validate_file"],
|
39
|
+
"input": ["training_file"],
|
40
|
+
"config": openrouter_finetune_llmconfig
|
41
|
+
}
|
42
|
+
|
43
|
+
|
44
|
+
openrouter_schema = {
|
45
|
+
|
46
|
+
"chat": openrouter_chat_schema,
|
47
|
+
"finetune": openrouter_finetune_schema
|
48
|
+
|
49
|
+
}
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# from .messages import Response, Instruction, System
|
2
|
+
from .conversations import Conversation
|
3
|
+
from .sessions import Session
|
4
|
+
|
5
|
+
# from .instruction_sets import InstructionSet
|
6
|
+
# from .flows.flow import Flow
|
7
|
+
|
8
|
+
|
9
|
+
__all__ = [
|
10
|
+
# "Response",
|
11
|
+
# "Instruction",
|
12
|
+
# "System",
|
13
|
+
"Conversation",
|
14
|
+
"Session", #"Flow", "InstructionSet"
|
15
|
+
]
|
@@ -0,0 +1 @@
|
|
1
|
+
# dynamically structured preconfigured instructions
|
@@ -1,6 +1,6 @@
|
|
1
1
|
from datetime import datetime
|
2
2
|
import json
|
3
|
-
from ..utils
|
3
|
+
from ..utils import create_id, lcall
|
4
4
|
from ..schema import DataLogger
|
5
5
|
|
6
6
|
|
@@ -61,7 +61,7 @@ class Message:
|
|
61
61
|
|
62
62
|
name (str): The name associated with the message. Default is None.
|
63
63
|
"""
|
64
|
-
if sum(
|
64
|
+
if sum(lcall([system, instruction, response], bool)) > 1:
|
65
65
|
raise ValueError("Error: Message cannot have more than one role.")
|
66
66
|
|
67
67
|
else:
|