lionagi 0.0.111__tar.gz → 0.0.113__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (103) hide show
  1. {lionagi-0.0.111 → lionagi-0.0.113}/PKG-INFO +44 -15
  2. lionagi-0.0.113/README.md +113 -0
  3. {lionagi-0.0.111 → lionagi-0.0.113}/lionagi/__init__.py +7 -2
  4. lionagi-0.0.113/lionagi/bridge/__init__.py +7 -0
  5. lionagi-0.0.113/lionagi/bridge/langchain.py +131 -0
  6. lionagi-0.0.113/lionagi/bridge/llama_index.py +157 -0
  7. lionagi-0.0.113/lionagi/configs/__init__.py +7 -0
  8. lionagi-0.0.113/lionagi/configs/oai_configs.py +49 -0
  9. lionagi-0.0.113/lionagi/configs/openrouter_config.py +49 -0
  10. lionagi-0.0.113/lionagi/core/__init__.py +15 -0
  11. lionagi-0.0.111/lionagi/session/conversation.py → lionagi-0.0.113/lionagi/core/conversations.py +10 -17
  12. lionagi-0.0.113/lionagi/core/flows.py +1 -0
  13. lionagi-0.0.113/lionagi/core/instruction_sets.py +1 -0
  14. lionagi-0.0.111/lionagi/session/message.py → lionagi-0.0.113/lionagi/core/messages.py +5 -5
  15. lionagi-0.0.113/lionagi/core/sessions.py +262 -0
  16. lionagi-0.0.113/lionagi/datastore/__init__.py +1 -0
  17. lionagi-0.0.113/lionagi/datastore/chroma.py +1 -0
  18. lionagi-0.0.113/lionagi/datastore/deeplake.py +1 -0
  19. lionagi-0.0.113/lionagi/datastore/elasticsearch.py +1 -0
  20. lionagi-0.0.113/lionagi/datastore/lantern.py +1 -0
  21. lionagi-0.0.113/lionagi/datastore/pinecone.py +1 -0
  22. lionagi-0.0.113/lionagi/datastore/postgres.py +1 -0
  23. lionagi-0.0.113/lionagi/datastore/qdrant.py +1 -0
  24. lionagi-0.0.113/lionagi/loader/__init__.py +12 -0
  25. lionagi-0.0.113/lionagi/loader/chunker.py +157 -0
  26. lionagi-0.0.113/lionagi/loader/reader.py +124 -0
  27. lionagi-0.0.113/lionagi/objs/__init__.py +7 -0
  28. lionagi-0.0.113/lionagi/objs/messenger.py +163 -0
  29. lionagi-0.0.113/lionagi/objs/tool_registry.py +247 -0
  30. lionagi-0.0.113/lionagi/schema/__init__.py +11 -0
  31. lionagi-0.0.113/lionagi/schema/base_condition.py +1 -0
  32. lionagi-0.0.113/lionagi/schema/base_schema.py +239 -0
  33. lionagi-0.0.113/lionagi/schema/base_tool.py +9 -0
  34. lionagi-0.0.113/lionagi/schema/data_logger.py +94 -0
  35. lionagi-0.0.113/lionagi/services/__init__.py +14 -0
  36. lionagi-0.0.113/lionagi/services/anthropic.py +1 -0
  37. lionagi-0.0.113/lionagi/services/anyscale.py +0 -0
  38. lionagi-0.0.113/lionagi/services/azure.py +1 -0
  39. lionagi-0.0.111/lionagi/api/oai_service.py → lionagi-0.0.113/lionagi/services/base_api_service.py +74 -148
  40. lionagi-0.0.113/lionagi/services/bedrock.py +0 -0
  41. lionagi-0.0.113/lionagi/services/chatcompletion.py +48 -0
  42. lionagi-0.0.113/lionagi/services/everlyai.py +0 -0
  43. lionagi-0.0.113/lionagi/services/gemini.py +0 -0
  44. lionagi-0.0.113/lionagi/services/gpt4all.py +0 -0
  45. lionagi-0.0.113/lionagi/services/huggingface.py +0 -0
  46. lionagi-0.0.113/lionagi/services/litellm.py +1 -0
  47. lionagi-0.0.113/lionagi/services/localai.py +0 -0
  48. lionagi-0.0.113/lionagi/services/mistralai.py +0 -0
  49. lionagi-0.0.113/lionagi/services/oai.py +34 -0
  50. lionagi-0.0.113/lionagi/services/ollama.py +1 -0
  51. lionagi-0.0.113/lionagi/services/openllm.py +0 -0
  52. lionagi-0.0.113/lionagi/services/openrouter.py +32 -0
  53. lionagi-0.0.113/lionagi/services/perplexity.py +0 -0
  54. lionagi-0.0.113/lionagi/services/predibase.py +0 -0
  55. lionagi-0.0.113/lionagi/services/rungpt.py +0 -0
  56. lionagi-0.0.111/lionagi/utils/api_util.py → lionagi-0.0.113/lionagi/services/service_objs.py +14 -171
  57. lionagi-0.0.113/lionagi/services/vllm.py +0 -0
  58. lionagi-0.0.113/lionagi/services/xinference.py +0 -0
  59. lionagi-0.0.113/lionagi/structure/__init__.py +7 -0
  60. lionagi-0.0.113/lionagi/structure/relationship.py +128 -0
  61. lionagi-0.0.113/lionagi/structure/structure.py +160 -0
  62. lionagi-0.0.113/lionagi/tests/__init__.py +0 -0
  63. lionagi-0.0.113/lionagi/tests/test_flatten_util.py +426 -0
  64. lionagi-0.0.113/lionagi/tools/__init__.py +0 -0
  65. lionagi-0.0.113/lionagi/tools/coder.py +1 -0
  66. lionagi-0.0.113/lionagi/tools/planner.py +1 -0
  67. lionagi-0.0.113/lionagi/tools/prompter.py +1 -0
  68. lionagi-0.0.113/lionagi/tools/sandbox.py +1 -0
  69. lionagi-0.0.113/lionagi/tools/scorer.py +1 -0
  70. lionagi-0.0.113/lionagi/tools/summarizer.py +1 -0
  71. lionagi-0.0.113/lionagi/tools/validator.py +1 -0
  72. lionagi-0.0.113/lionagi/utils/__init__.py +49 -0
  73. lionagi-0.0.113/lionagi/utils/api_util.py +86 -0
  74. lionagi-0.0.113/lionagi/utils/call_util.py +347 -0
  75. lionagi-0.0.113/lionagi/utils/flat_util.py +540 -0
  76. lionagi-0.0.113/lionagi/utils/io_util.py +102 -0
  77. lionagi-0.0.113/lionagi/utils/load_utils.py +190 -0
  78. lionagi-0.0.113/lionagi/utils/sys_util.py +191 -0
  79. lionagi-0.0.113/lionagi/utils/tool_util.py +92 -0
  80. lionagi-0.0.113/lionagi/utils/type_util.py +81 -0
  81. lionagi-0.0.113/lionagi/version.py +1 -0
  82. {lionagi-0.0.111 → lionagi-0.0.113}/lionagi.egg-info/PKG-INFO +44 -15
  83. lionagi-0.0.113/lionagi.egg-info/SOURCES.txt +89 -0
  84. {lionagi-0.0.111 → lionagi-0.0.113}/setup.py +1 -2
  85. lionagi-0.0.111/README.md +0 -84
  86. lionagi-0.0.111/lionagi/api/__init__.py +0 -8
  87. lionagi-0.0.111/lionagi/api/oai_config.py +0 -16
  88. lionagi-0.0.111/lionagi/session/__init__.py +0 -7
  89. lionagi-0.0.111/lionagi/session/session.py +0 -380
  90. lionagi-0.0.111/lionagi/utils/__init__.py +0 -11
  91. lionagi-0.0.111/lionagi/utils/doc_util.py +0 -331
  92. lionagi-0.0.111/lionagi/utils/log_util.py +0 -86
  93. lionagi-0.0.111/lionagi/utils/sys_util.py +0 -766
  94. lionagi-0.0.111/lionagi/utils/tool_util.py +0 -209
  95. lionagi-0.0.111/lionagi/version.py +0 -1
  96. lionagi-0.0.111/lionagi.egg-info/SOURCES.txt +0 -25
  97. {lionagi-0.0.111 → lionagi-0.0.113}/LICENSE +0 -0
  98. {lionagi-0.0.111 → lionagi-0.0.113}/README.rst +0 -0
  99. {lionagi-0.0.111 → lionagi-0.0.113}/lionagi.egg-info/dependency_links.txt +0 -0
  100. {lionagi-0.0.111 → lionagi-0.0.113}/lionagi.egg-info/requires.txt +0 -0
  101. {lionagi-0.0.111 → lionagi-0.0.113}/lionagi.egg-info/top_level.txt +0 -0
  102. {lionagi-0.0.111 → lionagi-0.0.113}/pyproject.toml +0 -0
  103. {lionagi-0.0.111 → lionagi-0.0.113}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lionagi
3
- Version: 0.0.111
3
+ Version: 0.0.113
4
4
  Summary: Towards automated general intelligence.
5
5
  Author: HaiyangLi
6
6
  Author-email: Haiyang Li <ocean@lionagi.ai>
@@ -220,39 +220,50 @@ Requires-Dist: python-dotenv==1.0.0
220
220
  Requires-Dist: tiktoken==0.5.1
221
221
  Requires-Dist: httpx==0.25.1
222
222
 
223
- ![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935) ![PyPI - Downloads](https://img.shields.io/pypi/dm/lionagi?labelColor=233476aa&color=231fc935) ![GitHub License](https://img.shields.io/github/license/lion-agi/lionagi?labelColor=233476aa&color=231fc935)
223
+ ![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935) ![Read the Docs](https://img.shields.io/readthedocs/lionagi) ![PyPI - License](https://img.shields.io/pypi/l/lionagi?color=231fc935) ![PyPI - Downloads](https://img.shields.io/pypi/dm/lionagi?color=blue)
224
224
 
225
- [PyPI](https://pypi.org/project/lionagi/) | [Documentation](https://lionagi.readthedocs.io/en/latest/) | [Website](https://www.lionagi.ai) | [Discord](https://discord.gg/7RGWqpSxze)
225
+
226
+
227
+
228
+
229
+ [PyPI](https://pypi.org/project/lionagi/) | [Documentation](https://lionagi.readthedocs.io/en/latest/) | [Discord](https://discord.gg/7RGWqpSxze)
226
230
 
227
231
 
228
232
  # LionAGI
229
233
  **Towards Automated General Intelligence**
230
234
 
231
- LionAGI is a Python intelligent agent framework that combines data manipulation with AI tools, aiming to simplify the integration of advanced machine learning tools, such as Large Language Models (i.e. OpenAI's GPT), with production-level data-centric projects.
235
+
236
+ LionAGI is a cutting-edge **intelligent agent framework**. It integrates data manipulation with advanced machine learning tools, such as Large Language Models (i.e. OpenAI's GPT).
237
+ - Designed for data-centric, production-level projects,
238
+ - dramatically lowers the barrier in creating intelligent, automated systems
239
+ - that can understand and interact meaningfully with large volumes of data.
232
240
 
233
241
  Install LionAGI with pip:
234
242
 
235
243
  ```bash
236
244
  pip install lionagi
237
245
  ```
238
- Download the `.env_template` file, input your OPENAI_API_KEY, save the file, rename as `.env` and put in your project's root directory.
246
+ Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
247
+ by default we use `OPENAI_API_KEY`.
239
248
 
240
- ### Features
241
249
 
242
- - Robust performance. LionAGI is written in almost pure python. With minimum external dependency (`aiohttp`, `httpx`, `python-dotenv`, `tiktoken`)
243
- - Efficient data operations for reading, chunking, binning, writing, storing and managing data.
244
- - Fast interaction with LLM services like OpenAI with **configurable rate limiting concurrent API calls** for maximum throughput.
245
- - Create a production ready LLM application **in hours**. Intuitive workflow management to streamline and expedite the process from idea to market.
246
250
 
251
+ ### Features
252
+ - Create a production ready LLM application **in hours**, with more than 100 models to choose from
253
+ - written in pure python, minimum dependency `aiohttp`, `python-dotenv`, `tiktoken`, `pydantic`
254
+ - Efficient and verstile data operations for reading, chunking, binning, writing, storing data with built-in support for `langchain` and `llamaindex`
255
+ - Unified interface with any LLM provider, API or local
256
+ - Fast and **concurrent** API call with **configurable rate limit**
257
+ - (Work In Progress) support for hundreds of models both API and local
247
258
  ---
248
- Currently, LionAGI only natively support OpenAI API calls, support for other LLM providers as well as open source models will be integrated in future releases. LionAGI is designed to be async only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
259
+ LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
249
260
 
250
261
 
251
262
  **Notice**:
252
263
  * calling API with maximum throughput over large set of data with advanced models i.e. gpt-4 can get **EXPENSIVE IN JUST SECONDS**,
253
264
  * please know what you are doing, and check the usage on OpenAI regularly
254
265
  * default rate limits are set to be **tier 1** of OpenAI model `gpt-4-1104-preview`, please check the [OpenAI usage limit documentation](https://platform.openai.com/docs/guides/rate-limits?context=tier-free) you can modify token rate parameters to fit different use cases.
255
- * Documentation is under process
266
+ * if you would like to build from source, please download the [latest release](https://github.com/lion-agi/lionagi/releases), **main is under development and will be changed without notice**
256
267
 
257
268
 
258
269
  ### Quick Start
@@ -266,11 +277,11 @@ import lionagi as li
266
277
  system = "You are a helpful assistant designed to perform calculations."
267
278
  instruction = {"Addition":"Add the two numbers together i.e. x+y"}
268
279
  context = {"x": 10, "y": 5}
280
+ ```
269
281
 
270
- # Initialize a session with a system message
282
+ ```python
283
+ # in interactive environment (.ipynb for example)
271
284
  calculator = li.Session(system=system)
272
-
273
- # run a LLM API call
274
285
  result = await calculator.initiate(instruction=instruction,
275
286
  context=context,
276
287
  model="gpt-4-1106-preview")
@@ -278,6 +289,24 @@ result = await calculator.initiate(instruction=instruction,
278
289
  print(f"Calculation Result: {result}")
279
290
  ```
280
291
 
292
+ ```python
293
+ # or otherwise, you can use
294
+ import asyncio
295
+ from dotenv import loadenv
296
+
297
+ load_dotenv()
298
+
299
+ async def main():
300
+ calculator = li.Session(system=system)
301
+ result = await calculator.initiate(instruction=instruction,
302
+ context=context,
303
+ model="gpt-4-1106-preview")
304
+ print(f"Calculation Result: {result}")
305
+
306
+ if __name__ == "__main__":
307
+ asyncio.run(main())
308
+ ```
309
+
281
310
  Visit our notebooks for our examples.
282
311
 
283
312
  ### Community
@@ -0,0 +1,113 @@
1
+ ![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935) ![Read the Docs](https://img.shields.io/readthedocs/lionagi) ![PyPI - License](https://img.shields.io/pypi/l/lionagi?color=231fc935) ![PyPI - Downloads](https://img.shields.io/pypi/dm/lionagi?color=blue)
2
+
3
+
4
+
5
+
6
+
7
+ [PyPI](https://pypi.org/project/lionagi/) | [Documentation](https://lionagi.readthedocs.io/en/latest/) | [Discord](https://discord.gg/7RGWqpSxze)
8
+
9
+
10
+ # LionAGI
11
+ **Towards Automated General Intelligence**
12
+
13
+
14
+ LionAGI is a cutting-edge **intelligent agent framework**. It integrates data manipulation with advanced machine learning tools, such as Large Language Models (i.e. OpenAI's GPT).
15
+ - Designed for data-centric, production-level projects,
16
+ - dramatically lowers the barrier in creating intelligent, automated systems
17
+ - that can understand and interact meaningfully with large volumes of data.
18
+
19
+ Install LionAGI with pip:
20
+
21
+ ```bash
22
+ pip install lionagi
23
+ ```
24
+ Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
25
+ by default we use `OPENAI_API_KEY`.
26
+
27
+
28
+
29
+ ### Features
30
+ - Create a production ready LLM application **in hours**, with more than 100 models to choose from
31
+ - written in pure python, minimum dependency `aiohttp`, `python-dotenv`, `tiktoken`, `pydantic`
32
+ - Efficient and verstile data operations for reading, chunking, binning, writing, storing data with built-in support for `langchain` and `llamaindex`
33
+ - Unified interface with any LLM provider, API or local
34
+ - Fast and **concurrent** API call with **configurable rate limit**
35
+ - (Work In Progress) support for hundreds of models both API and local
36
+ ---
37
+ LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
38
+
39
+
40
+ **Notice**:
41
+ * calling API with maximum throughput over large set of data with advanced models i.e. gpt-4 can get **EXPENSIVE IN JUST SECONDS**,
42
+ * please know what you are doing, and check the usage on OpenAI regularly
43
+ * default rate limits are set to be **tier 1** of OpenAI model `gpt-4-1104-preview`, please check the [OpenAI usage limit documentation](https://platform.openai.com/docs/guides/rate-limits?context=tier-free) you can modify token rate parameters to fit different use cases.
44
+ * if you would like to build from source, please download the [latest release](https://github.com/lion-agi/lionagi/releases), **main is under development and will be changed without notice**
45
+
46
+
47
+ ### Quick Start
48
+
49
+ The following example shows how to use LionAGI's `Session` object to interact with `gpt-4` model:
50
+
51
+ ```python
52
+ import lionagi as li
53
+
54
+ # define system messages, context and user instruction
55
+ system = "You are a helpful assistant designed to perform calculations."
56
+ instruction = {"Addition":"Add the two numbers together i.e. x+y"}
57
+ context = {"x": 10, "y": 5}
58
+ ```
59
+
60
+ ```python
61
+ # in interactive environment (.ipynb for example)
62
+ calculator = li.Session(system=system)
63
+ result = await calculator.initiate(instruction=instruction,
64
+ context=context,
65
+ model="gpt-4-1106-preview")
66
+
67
+ print(f"Calculation Result: {result}")
68
+ ```
69
+
70
+ ```python
71
+ # or otherwise, you can use
72
+ import asyncio
73
+ from dotenv import loadenv
74
+
75
+ load_dotenv()
76
+
77
+ async def main():
78
+ calculator = li.Session(system=system)
79
+ result = await calculator.initiate(instruction=instruction,
80
+ context=context,
81
+ model="gpt-4-1106-preview")
82
+ print(f"Calculation Result: {result}")
83
+
84
+ if __name__ == "__main__":
85
+ asyncio.run(main())
86
+ ```
87
+
88
+ Visit our notebooks for our examples.
89
+
90
+ ### Community
91
+
92
+ We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members [Join Our Discord](https://discord.gg/7RGWqpSxze)
93
+
94
+ ### Citation
95
+
96
+ When referencing LionAGI in your projects or research, please cite:
97
+
98
+ ```bibtex
99
+ @software{Li_LionAGI_2023,
100
+ author = {Haiyang Li},
101
+ month = {12},
102
+ year = {2023},
103
+ title = {LionAGI: Towards Automated General Intelligence},
104
+ url = {https://github.com/lion-agi/lionagi},
105
+ }
106
+ ```
107
+
108
+ ## Star History
109
+ ![Star History Chart](https://api.star-history.com/svg?repos=lion-agi/lionagi&type=Date)
110
+
111
+ ### Requirements
112
+ Python 3.9 or higher.
113
+
@@ -16,9 +16,14 @@ Copyright 2023 HaiyangLi <ocean@lionagi.ai>
16
16
 
17
17
  import logging
18
18
  from .version import __version__
19
- from .session import *
19
+
20
20
  from .utils import *
21
- from .api import *
21
+ from .schema import *
22
+ from .structure import *
23
+ from .core import *
24
+ from .objs import *
25
+ # from .datastore import *
26
+ # from .structure import *
22
27
 
23
28
 
24
29
  logger = logging.getLogger(__name__)
@@ -0,0 +1,7 @@
1
+ from .langchain import from_langchain
2
+ from .llama_index import from_llama_index
3
+
4
+ __all__ = [
5
+ "from_langchain",
6
+ "from_llama_index"
7
+ ]
@@ -0,0 +1,131 @@
1
+ from typing import Union, Callable, List, Dict, Any
2
+ from ..schema.base_schema import T, DataNode
3
+ from ..utils.sys_util import change_dict_key
4
+
5
+
6
+ def from_langchain(lc_doc: Any) -> T:
7
+ """
8
+ Converts a langchain document into a DataNode object.
9
+
10
+ Parameters:
11
+ lc_doc (Any): The langchain document to be converted.
12
+
13
+ Returns:
14
+ DataNode: A DataNode object created from the langchain document.
15
+ """
16
+ info_json = lc_doc.to_json()
17
+ info_node = {'lc_id': info_json['id']}
18
+ info_node = {**info_node, **info_json['kwargs']}
19
+ return DataNode(**info_node)
20
+
21
+ def to_langchain_document(datanode: T, **kwargs: Any) -> Any:
22
+ """
23
+ Converts a DataNode into a langchain Document.
24
+
25
+ Parameters:
26
+ datanode (DataNode): The DataNode to be converted.
27
+
28
+ **kwargs: Additional keyword arguments to be included in the Document.
29
+
30
+ Returns:
31
+ Any: A langchain Document created from the DataNode.
32
+ """
33
+ from langchain.schema import Document
34
+
35
+ dnode = datanode.to_dict()
36
+ change_dict_key(dnode, old_key='content', new_key='page_content')
37
+ change_dict_key(dnode, old_key='lc_id', new_key='id_')
38
+ dnode = {**dnode, **kwargs}
39
+ return Document(**dnode)
40
+
41
+ def langchain_loader(loader: Union[str, Callable],
42
+ loader_args: List[Any] = [],
43
+ loader_kwargs: Dict[str, Any] = {}) -> Any:
44
+ """
45
+ Loads data using a specified langchain loader.
46
+
47
+ Parameters:
48
+ loader (Union[str, Callable]): The name of the loader function or the loader function itself.
49
+
50
+ loader_args (List[Any]): Positional arguments to pass to the loader function.
51
+
52
+ loader_kwargs (Dict[str, Any]): Keyword arguments to pass to the loader function.
53
+
54
+ Returns:
55
+ Any: The data loaded by the loader function.
56
+
57
+ Raises:
58
+ ValueError: If the specified loader is invalid or if the loader fails to load data.
59
+ """
60
+ import langchain.document_loaders as document_loaders
61
+
62
+ try:
63
+ if isinstance(loader, str):
64
+ loader = getattr(document_loaders, loader)
65
+ else:
66
+ loader = loader
67
+ except Exception as e:
68
+ raise ValueError(f'Invalid loader: {loader}. Error: {e}')
69
+
70
+ try:
71
+ loader_obj = loader(*loader_args, **loader_kwargs)
72
+ data = loader_obj.load()
73
+ return data
74
+ except Exception as e:
75
+ raise ValueError(f'Failed to load. Error: {e}')
76
+
77
+ def langchain_text_splitter(data: Union[str, List],
78
+ splitter: Union[str, Callable],
79
+ splitter_args: List[Any] = [],
80
+ splitter_kwargs: Dict[str, Any] = {}) -> List[str]:
81
+
82
+ import langchain.text_splitter as text_splitter
83
+
84
+ try:
85
+ if isinstance(splitter, str):
86
+ splitter = getattr(text_splitter, splitter)
87
+ else:
88
+ splitter = splitter
89
+ except Exception as e:
90
+ raise ValueError(f'Invalid text splitter: {splitter}. Error: {e}')
91
+
92
+ try:
93
+ splitter_obj = splitter(*splitter_args, **splitter_kwargs)
94
+ if isinstance(data, str):
95
+ chunk = splitter_obj.split_text(data)
96
+ else:
97
+ chunk = splitter_obj.split_documents(data)
98
+ return chunk
99
+ except Exception as e:
100
+ raise ValueError(f'Failed to split. Error: {e}')
101
+
102
+ # def langchain_code_splitter(doc: str,
103
+ # language: str,
104
+ # splitter_args: List[Any] = [],
105
+ # splitter_kwargs: Dict[str, Any] = {}) -> List[Any]:
106
+ # """
107
+ # Splits code into smaller chunks using a RecursiveCharacterTextSplitter specific to a language.
108
+ #
109
+ # Parameters:
110
+ # doc (str): The code document to be split.
111
+ # language (str): The programming language of the code.
112
+ # splitter_args (List[Any]): Positional arguments to pass to the splitter.
113
+ # splitter_kwargs (Dict[str, Any]): Keyword arguments to pass to the splitter.
114
+ #
115
+ # Returns:
116
+ # List[Any]: A list of Documents, each representing a chunk of the original code.
117
+ #
118
+ # Raises:
119
+ # ValueError: If the splitter fails to split the code document.
120
+ # """
121
+ # from langchain.text_splitter import RecursiveCharacterTextSplitter
122
+ #
123
+ # try:
124
+ # splitter = RecursiveCharacterTextSplitter.from_language(
125
+ # language=language, *splitter_args, **splitter_kwargs
126
+ # )
127
+ # docs = splitter.create_documents([doc])
128
+ # return docs
129
+ # except Exception as e:
130
+ # raise ValueError(f'Failed to split. Error: {e}')
131
+ #
@@ -0,0 +1,157 @@
1
+ from typing import Union, Callable, List, Any, Dict
2
+ from ..schema.base_schema import DataNode, T
3
+ from ..utils.sys_util import change_dict_key
4
+
5
+
6
+ def from_llama_index(llama_node: Any, **kwargs: Any) -> T:
7
+ """
8
+ Converts a Llama Index node into a DataNode object.
9
+
10
+ Parameters:
11
+ llama_node (Any): The Llama Index node to be converted.
12
+
13
+ **kwargs: Additional keyword arguments for JSON serialization.
14
+
15
+ Returns:
16
+ DataNode: A DataNode object created from the Llama Index node.
17
+ """
18
+ llama_dict = llama_node.to_dict(**kwargs)
19
+ return DataNode.from_dict(llama_dict)
20
+
21
+ def to_llama_index_textnode(datanode: T, **kwargs: Any) -> Any:
22
+ """
23
+ Converts a DataNode into a Llama Index TextNode.
24
+
25
+ Parameters:
26
+ datanode (DataNode): The DataNode to be converted.
27
+
28
+ **kwargs: Additional keyword arguments to be included in the TextNode.
29
+
30
+ Returns:
31
+ TextNode: A Llama Index TextNode created from the DataNode.
32
+ """
33
+ # to llama_index textnode
34
+ from llama_index.schema import TextNode
35
+
36
+ dnode = datanode.to_dict()
37
+ change_dict_key(dnode, old_key='content', new_key='text')
38
+ change_dict_key(dnode, old_key='node_id', new_key='id_')
39
+
40
+ dnode = {**dnode, **kwargs}
41
+ return TextNode.from_dict(dnode)
42
+
43
+ def get_llama_reader(reader: Union[str, Callable]) -> Callable:
44
+ """
45
+ Gets a Llama Index reader function.
46
+
47
+ Parameters:
48
+ reader (Union[str, Callable]): The name of the reader function or the reader function itself.
49
+
50
+ Returns:
51
+ Callable: The Llama Index reader function.
52
+
53
+ Raises:
54
+ ValueError: If the specified reader is invalid.
55
+ """
56
+ try:
57
+ if isinstance(reader, str):
58
+ if reader == 'SimpleDirectoryReader':
59
+ from llama_index import SimpleDirectoryReader
60
+ return SimpleDirectoryReader
61
+ else:
62
+ from llama_index import download_loader
63
+ return download_loader(reader)
64
+ else:
65
+ return reader
66
+ except Exception as e:
67
+ raise ValueError(f'Invalid reader: {reader}, Error: {e}')
68
+
69
+ def llama_index_reader(reader: Union[str, Callable],
70
+ reader_args: List[Any] = [],
71
+ reader_kwargs: Dict[str, Any] = {},
72
+ load_data_args: List[Any] = [],
73
+ load_data_kwargs: Dict[str, Any] = {}) -> List[Any]:
74
+ """
75
+ Loads documents using a specified Llama Index reader.
76
+
77
+ Parameters:
78
+ reader (Union[str, Callable]): The name of the reader function or the reader function itself.
79
+
80
+ reader_args (List[Any]): Positional arguments to pass to the reader function.
81
+
82
+ reader_kwargs (Dict[str, Any]): Keyword arguments to pass to the reader function.
83
+
84
+ load_data_args (List[Any]): Positional arguments for the load_data method.
85
+
86
+ load_data_kwargs (Dict[str, Any]): Keyword arguments for the load_data method.
87
+
88
+ Returns:
89
+ List[Any]: A list of documents loaded by the reader.
90
+
91
+ Raises:
92
+ ValueError: If the specified reader is invalid or if the reader fails to load documents.
93
+ """
94
+ reader = get_llama_reader(reader)
95
+
96
+ try:
97
+ loader = reader(*reader_args, **reader_kwargs)
98
+ documents = loader.load_data(*load_data_args, **load_data_kwargs)
99
+ return documents
100
+
101
+ except Exception as e:
102
+ raise ValueError(f'Failed to read. Error: {e}')
103
+
104
+ def get_llama_parser(parser: Union[str, Callable]) -> Callable:
105
+ import llama_index.node_parser as node_parser
106
+ import llama_index.text_splitter as text_splitter
107
+
108
+ try:
109
+ return getattr(node_parser, parser)
110
+ except Exception as e1:
111
+ try:
112
+ if isinstance(parser, str):
113
+ return getattr(text_splitter, parser)
114
+ else:
115
+ return parser
116
+ except Exception as e2:
117
+ raise ValueError(f'Invalid node parser: {parser}. Error: {e1}, {e2}')
118
+
119
+
120
+ def llama_index_node_parser(documents: List[Any],
121
+ parser: Union[str, Callable],
122
+ parser_args: List[Any] = [],
123
+ parser_kwargs: Dict[str, Any] = {},
124
+ parsing_kwargs: Dict[str, Any] = {}) -> List[Any]:
125
+ """
126
+ Parses documents into nodes using a specified Llama Index node parser.
127
+
128
+ Parameters:
129
+ documents (List[Any]): The documents to parse.
130
+
131
+ parser (Union[str, Callable]): The name of the parser function or the parser function itself.
132
+
133
+ parser_args (List[Any]): Positional arguments to pass to the parser function.
134
+
135
+ parser_kwargs (Dict[str, Any]): Keyword arguments to pass to the parser function.
136
+
137
+ Returns:
138
+ List[Any]: A list of nodes parsed from the documents.
139
+
140
+ Raises:
141
+ ValueError: If the specified parser is invalid or if the parser fails to parse the documents.
142
+ """
143
+ parser = get_llama_parser(parser)
144
+
145
+ try:
146
+ parser_obj = parser(*parser_args, **parser_kwargs)
147
+ nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)
148
+ return nodes
149
+
150
+ except Exception as e1:
151
+ try:
152
+ parser_obj = parser.from_defaults(*parser_args, **parser_kwargs)
153
+ nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)
154
+ return nodes
155
+ except Exception as e2:
156
+ raise ValueError(f'Failed to parse. Error: {e1}, {e2}')
157
+
@@ -0,0 +1,7 @@
1
+ from .oai_configs import oai_schema
2
+ from .openrouter_config import openrouter_schema
3
+
4
+ __all__ = [
5
+ "oai_schema",
6
+ "openrouter_schema"
7
+ ]
@@ -0,0 +1,49 @@
1
+ oai_chat_llmconfig = {
2
+ "model": "gpt-4-1106-preview",
3
+ "frequency_penalty": 0,
4
+ "max_tokens": None,
5
+ "n": 1,
6
+ "presence_penalty": 0,
7
+ "response_format": {"type": "text"},
8
+ "seed": None,
9
+ "stop": None,
10
+ "stream": False,
11
+ "temperature": 0.7,
12
+ "top_p": 1,
13
+ "tools": None,
14
+ "tool_choice": "none",
15
+ "user": None
16
+ }
17
+
18
+ oai_chat_schema = {
19
+ "required" : ["model", "frequency_penalty", "n", "presence_penalty", "response_format", "temperature", "top_p"],
20
+ "optional": ["seed", "stop", "stream", "tools", "tool_choice", "user", "max_tokens"],
21
+ "input": "messages",
22
+ "config": oai_chat_llmconfig
23
+ }
24
+
25
+ oai_finetune_llmconfig = {
26
+ "model": "gpt-3.5-turbo",
27
+ "hyperparameters": {
28
+ "batch_size": "auto",
29
+ "learning_rate_multiplier": "auto",
30
+ "n_epochs": "auto"
31
+ },
32
+ "suffix": None,
33
+ "training_file": None,
34
+ }
35
+
36
+ oai_finetune_schema = {
37
+ "required" : ["model", "training_file"],
38
+ "optional": ["hyperparameters", "suffix", "validate_file"],
39
+ "input": ["training_file"],
40
+ "config": oai_finetune_llmconfig
41
+ }
42
+
43
+
44
+ oai_schema = {
45
+
46
+ "chat": oai_chat_schema,
47
+ "finetune": oai_finetune_schema
48
+
49
+ }
@@ -0,0 +1,49 @@
1
+ openrouter_chat_llmconfig = {
2
+ "model": "gpt-4-1106-preview",
3
+ "frequency_penalty": 0,
4
+ "max_tokens": None,
5
+ "n": 1,
6
+ "presence_penalty": 0,
7
+ "response_format": {"type": "text"},
8
+ "seed": None,
9
+ "stop": None,
10
+ "stream": False,
11
+ "temperature": 0.7,
12
+ "top_p": 1,
13
+ "tools": None,
14
+ "tool_choice": "none",
15
+ "user": None
16
+ }
17
+
18
+ openrouter_chat_schema = {
19
+ "required" : ["model", "frequency_penalty", "n", "presence_penalty", "response_format", "temperature", "top_p"],
20
+ "optional": ["seed", "stop", "stream", "tools", "tool_choice", "user", "max_tokens"],
21
+ "input": "messages",
22
+ "config": openrouter_chat_llmconfig
23
+ }
24
+
25
+ openrouter_finetune_llmconfig = {
26
+ "model": "gpt-3.5-turbo",
27
+ "hyperparameters": {
28
+ "batch_size": "auto",
29
+ "learning_rate_multiplier": "auto",
30
+ "n_epochs": "auto"
31
+ },
32
+ "suffix": None,
33
+ "training_file": None,
34
+ }
35
+
36
+ openrouter_finetune_schema = {
37
+ "required" : ["model", "training_file"],
38
+ "optional": ["hyperparameters", "suffix", "validate_file"],
39
+ "input": ["training_file"],
40
+ "config": openrouter_finetune_llmconfig
41
+ }
42
+
43
+
44
+ openrouter_schema = {
45
+
46
+ "chat": openrouter_chat_schema,
47
+ "finetune": openrouter_finetune_schema
48
+
49
+ }
@@ -0,0 +1,15 @@
1
+ # from .messages import Response, Instruction, System
2
+ from .conversations import Conversation
3
+ from .sessions import Session
4
+
5
+ # from .instruction_sets import InstructionSet
6
+ # from .flows.flow import Flow
7
+
8
+
9
+ __all__ = [
10
+ # "Response",
11
+ # "Instruction",
12
+ # "System",
13
+ "Conversation",
14
+ "Session", #"Flow", "InstructionSet"
15
+ ]