lionagi 0.0.208__py3-none-any.whl → 0.0.210__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/__init__.py +4 -6
- lionagi/api_service/base_endpoint.py +65 -0
- lionagi/api_service/base_rate_limiter.py +121 -0
- lionagi/api_service/base_service.py +146 -0
- lionagi/api_service/chat_completion.py +6 -0
- lionagi/api_service/embeddings.py +6 -0
- lionagi/api_service/payload_package.py +47 -0
- lionagi/api_service/status_tracker.py +29 -0
- lionagi/core/__init__.py +5 -9
- lionagi/core/branch.py +1191 -0
- lionagi/core/flow.py +423 -0
- lionagi/core/{instruction_set/instruction_set.py → instruction_set.py} +3 -3
- lionagi/core/session.py +872 -0
- lionagi/schema/__init__.py +5 -8
- lionagi/schema/base_schema.py +821 -0
- lionagi/{_services → services}/base_service.py +4 -4
- lionagi/{_services → services}/oai.py +4 -4
- lionagi/structures/graph.py +1 -1
- lionagi/structures/relationship.py +1 -1
- lionagi/structures/structure.py +1 -1
- lionagi/tools/tool_manager.py +0 -163
- lionagi/tools/tool_util.py +2 -1
- lionagi/utils/__init__.py +7 -14
- lionagi/utils/api_util.py +63 -2
- lionagi/utils/core_utils.py +338 -0
- lionagi/utils/sys_util.py +3 -3
- lionagi/version.py +1 -1
- {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/METADATA +28 -29
- lionagi-0.0.210.dist-info/RECORD +56 -0
- lionagi/_services/anthropic.py +0 -79
- lionagi/_services/anyscale.py +0 -0
- lionagi/_services/azure.py +0 -1
- lionagi/_services/bedrock.py +0 -0
- lionagi/_services/everlyai.py +0 -0
- lionagi/_services/gemini.py +0 -0
- lionagi/_services/gpt4all.py +0 -0
- lionagi/_services/huggingface.py +0 -0
- lionagi/_services/litellm.py +0 -33
- lionagi/_services/localai.py +0 -0
- lionagi/_services/openllm.py +0 -0
- lionagi/_services/openrouter.py +0 -44
- lionagi/_services/perplexity.py +0 -0
- lionagi/_services/predibase.py +0 -0
- lionagi/_services/rungpt.py +0 -0
- lionagi/_services/vllm.py +0 -0
- lionagi/_services/xinference.py +0 -0
- lionagi/agents/planner.py +0 -1
- lionagi/agents/prompter.py +0 -1
- lionagi/agents/scorer.py +0 -1
- lionagi/agents/summarizer.py +0 -1
- lionagi/agents/validator.py +0 -1
- lionagi/bridge/__init__.py +0 -22
- lionagi/bridge/langchain.py +0 -195
- lionagi/bridge/llama_index.py +0 -266
- lionagi/core/branch/__init__.py +0 -0
- lionagi/core/branch/branch.py +0 -841
- lionagi/core/branch/cluster.py +0 -1
- lionagi/core/branch/conversation.py +0 -787
- lionagi/core/core_util.py +0 -0
- lionagi/core/flow/__init__.py +0 -0
- lionagi/core/flow/flow.py +0 -19
- lionagi/core/flow/flow_util.py +0 -62
- lionagi/core/instruction_set/__init__.py +0 -0
- lionagi/core/messages/__init__.py +0 -0
- lionagi/core/sessions/__init__.py +0 -0
- lionagi/core/sessions/session.py +0 -504
- lionagi/datastores/__init__.py +0 -1
- lionagi/datastores/chroma.py +0 -1
- lionagi/datastores/deeplake.py +0 -1
- lionagi/datastores/elasticsearch.py +0 -1
- lionagi/datastores/lantern.py +0 -1
- lionagi/datastores/pinecone.py +0 -1
- lionagi/datastores/postgres.py +0 -1
- lionagi/datastores/qdrant.py +0 -1
- lionagi/loaders/__init__.py +0 -18
- lionagi/loaders/chunker.py +0 -166
- lionagi/loaders/load_util.py +0 -240
- lionagi/loaders/reader.py +0 -122
- lionagi/models/__init__.py +0 -0
- lionagi/models/base_model.py +0 -0
- lionagi/models/imodel.py +0 -53
- lionagi/schema/async_queue.py +0 -158
- lionagi/schema/base_condition.py +0 -1
- lionagi/schema/base_node.py +0 -422
- lionagi/schema/base_tool.py +0 -44
- lionagi/schema/data_logger.py +0 -126
- lionagi/schema/data_node.py +0 -88
- lionagi/schema/status_tracker.py +0 -37
- lionagi/tests/test_utils/test_encrypt_util.py +0 -323
- lionagi/utils/encrypt_util.py +0 -283
- lionagi/utils/url_util.py +0 -55
- lionagi-0.0.208.dist-info/RECORD +0 -106
- lionagi/{agents → api_service}/__init__.py +0 -0
- lionagi/core/{branch/branch_manager.py → branch_manager.py} +0 -0
- lionagi/core/{messages/messages.py → messages.py} +3 -3
- /lionagi/{_services → services}/__init__.py +0 -0
- /lionagi/{_services → services}/mistralai.py +0 -0
- /lionagi/{_services → services}/mlx_service.py +0 -0
- /lionagi/{_services → services}/ollama.py +0 -0
- /lionagi/{_services → services}/services.py +0 -0
- /lionagi/{_services → services}/transformers.py +0 -0
- {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/LICENSE +0 -0
- {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/WHEEL +0 -0
- {lionagi-0.0.208.dist-info → lionagi-0.0.210.dist-info}/top_level.txt +0 -0
lionagi/_services/openrouter.py
DELETED
@@ -1,44 +0,0 @@
|
|
1
|
-
from os import getenv
|
2
|
-
from ..configs.openrouter_configs import openrouter_schema
|
3
|
-
from .base_service import BaseService, PayloadCreation
|
4
|
-
|
5
|
-
class OpenRouterService(BaseService):
|
6
|
-
base_url = "https://openrouter.ai/api/v1/"
|
7
|
-
available_endpoints = ['chat/completions']
|
8
|
-
schema = openrouter_schema
|
9
|
-
key_scheme = "OPENROUTER_API_KEY"
|
10
|
-
token_encoding_name = "cl100k_base"
|
11
|
-
|
12
|
-
|
13
|
-
def __init__(self, api_key = None, key_scheme = None,schema = None, token_encoding_name: str = "cl100k_base", **kwargs):
|
14
|
-
key_scheme = key_scheme or self.key_scheme
|
15
|
-
super().__init__(
|
16
|
-
api_key = api_key or getenv(key_scheme),
|
17
|
-
schema = schema or self.schema,
|
18
|
-
token_encoding_name=token_encoding_name, **kwargs
|
19
|
-
)
|
20
|
-
self.active_endpoint = []
|
21
|
-
|
22
|
-
async def serve(self, input_, endpoint="chat/completions", method="post", **kwargs):
|
23
|
-
if endpoint not in self.active_endpoint:
|
24
|
-
await self. init_endpoint(endpoint)
|
25
|
-
if endpoint == "chat/completions":
|
26
|
-
return await self.serve_chat(input_, **kwargs)
|
27
|
-
else:
|
28
|
-
return ValueError(f'{endpoint} is currently not supported')
|
29
|
-
|
30
|
-
async def serve_chat(self, messages, **kwargs):
|
31
|
-
endpoint = "chat/completions"
|
32
|
-
|
33
|
-
if endpoint not in self.active_endpoint:
|
34
|
-
await self. init_endpoint(endpoint)
|
35
|
-
self.active_endpoint.append(endpoint)
|
36
|
-
payload = PayloadCreation.chat_completion(
|
37
|
-
messages, self.endpoints[endpoint].config, self.schema[endpoint], **kwargs)
|
38
|
-
|
39
|
-
try:
|
40
|
-
completion = await self.call_api(payload, endpoint, "post")
|
41
|
-
return payload, completion
|
42
|
-
except Exception as e:
|
43
|
-
self.status_tracker.num_tasks_failed += 1
|
44
|
-
raise e
|
lionagi/_services/perplexity.py
DELETED
File without changes
|
lionagi/_services/predibase.py
DELETED
File without changes
|
lionagi/_services/rungpt.py
DELETED
File without changes
|
lionagi/_services/vllm.py
DELETED
File without changes
|
lionagi/_services/xinference.py
DELETED
File without changes
|
lionagi/agents/planner.py
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
# TODO
|
lionagi/agents/prompter.py
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
# TODO
|
lionagi/agents/scorer.py
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
# TODO
|
lionagi/agents/summarizer.py
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
# TODO
|
lionagi/agents/validator.py
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
# TODO
|
lionagi/bridge/__init__.py
DELETED
@@ -1,22 +0,0 @@
|
|
1
|
-
from .langchain import(
|
2
|
-
from_langchain, to_langchain_document, langchain_loader,
|
3
|
-
langchain_loader, langchain_text_splitter
|
4
|
-
)
|
5
|
-
|
6
|
-
from .llama_index import (
|
7
|
-
from_llama_index, to_llama_index_textnode, get_llama_reader,
|
8
|
-
llama_index_reader, get_llama_parser, llama_index_node_parser
|
9
|
-
)
|
10
|
-
|
11
|
-
__all__ = [
|
12
|
-
'from_langchain',
|
13
|
-
'to_langchain_document',
|
14
|
-
'langchain_loader',
|
15
|
-
'from_llama_index',
|
16
|
-
'to_llama_index_textnode',
|
17
|
-
'get_llama_reader',
|
18
|
-
'llama_index_reader',
|
19
|
-
'get_llama_parser',
|
20
|
-
'llama_index_node_parser',
|
21
|
-
'langchain_text_splitter'
|
22
|
-
]
|
lionagi/bridge/langchain.py
DELETED
@@ -1,195 +0,0 @@
|
|
1
|
-
from typing import Union, Callable, List, Dict, Any, TypeVar
|
2
|
-
from ..utils.sys_util import change_dict_key, install_import
|
3
|
-
from ..schema.data_node import DataNode
|
4
|
-
|
5
|
-
|
6
|
-
T = TypeVar('T', bound='DataNode')
|
7
|
-
|
8
|
-
def from_langchain(lc_doc: Any) -> T:
|
9
|
-
"""
|
10
|
-
Converts a langchain document into a DataNode object.
|
11
|
-
|
12
|
-
Args:
|
13
|
-
lc_doc (Any): The langchain document to be converted.
|
14
|
-
|
15
|
-
Returns:
|
16
|
-
T: A DataNode object created from the langchain document.
|
17
|
-
|
18
|
-
Examples:
|
19
|
-
>>> lc_doc = LangchainDocument(...)
|
20
|
-
>>> data_node = from_langchain(lc_doc)
|
21
|
-
>>> isinstance(data_node, DataNode)
|
22
|
-
True
|
23
|
-
"""
|
24
|
-
info_json = lc_doc.to_json()
|
25
|
-
info_node = {'lc_id': info_json['id']}
|
26
|
-
info_node = {**info_node, **info_json['kwargs']}
|
27
|
-
return DataNode(**info_node)
|
28
|
-
|
29
|
-
def to_langchain_document(datanode: T, **kwargs: Any) -> Any:
|
30
|
-
"""
|
31
|
-
Converts a DataNode into a langchain Document.
|
32
|
-
|
33
|
-
Args:
|
34
|
-
datanode (T): The DataNode to be converted.
|
35
|
-
**kwargs: Additional keyword arguments to be included in the Document.
|
36
|
-
|
37
|
-
Returns:
|
38
|
-
Any: A langchain Document created from the DataNode.
|
39
|
-
|
40
|
-
Examples:
|
41
|
-
>>> data_node = DataNode(...)
|
42
|
-
>>> lc_document = to_langchain_document(data_node, author="John Doe")
|
43
|
-
>>> isinstance(lc_document, LangchainDocument)
|
44
|
-
True
|
45
|
-
"""
|
46
|
-
try:
|
47
|
-
from langchain.schema import Document
|
48
|
-
except ImportError:
|
49
|
-
try:
|
50
|
-
install_import(
|
51
|
-
package_name='langchain',
|
52
|
-
module_name='schema',
|
53
|
-
import_name='Document',
|
54
|
-
)
|
55
|
-
from langchain.schema import Document
|
56
|
-
except Exception as e:
|
57
|
-
raise ImportError(f'Unable to import required module from langchain. Please make sure that langchain is installed. Error: {e}')
|
58
|
-
|
59
|
-
dnode = datanode.to_dict()
|
60
|
-
change_dict_key(dnode, old_key='content', new_key='page_content')
|
61
|
-
change_dict_key(dnode, old_key='lc_id', new_key='id_')
|
62
|
-
dnode = {**dnode, **kwargs}
|
63
|
-
return Document(**dnode)
|
64
|
-
|
65
|
-
def langchain_loader(loader: Union[str, Callable],
|
66
|
-
loader_args: List[Any] = [],
|
67
|
-
loader_kwargs: Dict[str, Any] = {}) -> Any:
|
68
|
-
"""
|
69
|
-
Loads data using a specified langchain loader.
|
70
|
-
|
71
|
-
Args:
|
72
|
-
loader (Union[str, Callable]): The name of the loader function or the loader function itself.
|
73
|
-
loader_args (List[Any]): Positional arguments to pass to the loader function.
|
74
|
-
loader_kwargs (Dict[str, Any]): Keyword arguments to pass to the loader function.
|
75
|
-
|
76
|
-
Returns:
|
77
|
-
Any: The data loaded by the loader function.
|
78
|
-
|
79
|
-
Raises:
|
80
|
-
ValueError: If the specified loader is invalid or if the loader fails to load data.
|
81
|
-
|
82
|
-
Examples:
|
83
|
-
>>> data = langchain_loader("json_loader", loader_args=["data.json"])
|
84
|
-
>>> isinstance(data, dict)
|
85
|
-
True
|
86
|
-
"""
|
87
|
-
try:
|
88
|
-
import langchain.document_loaders as document_loaders
|
89
|
-
except ImportError:
|
90
|
-
try:
|
91
|
-
install_import(
|
92
|
-
package_name='langchain',
|
93
|
-
module_name='document_loaders',
|
94
|
-
)
|
95
|
-
import langchain.document_loaders as document_loaders
|
96
|
-
except Exception as e:
|
97
|
-
raise ImportError(f'Unable to import required module from langchain. Please make sure that langchain is installed. Error: {e}')
|
98
|
-
|
99
|
-
try:
|
100
|
-
if isinstance(loader, str):
|
101
|
-
try:
|
102
|
-
loader = getattr(document_loaders, loader)
|
103
|
-
except ImportError as e:
|
104
|
-
raise ValueError(f'Unable to import {loader} from langchain.document_loaders. Some dependency of LangChain are not installed. Error: {e}')
|
105
|
-
else:
|
106
|
-
loader = loader
|
107
|
-
except Exception as e:
|
108
|
-
raise ValueError(f'Invalid loader: {loader}. Error: {e}')
|
109
|
-
|
110
|
-
try:
|
111
|
-
loader_obj = loader(*loader_args, **loader_kwargs)
|
112
|
-
data = loader_obj.load()
|
113
|
-
return data
|
114
|
-
except Exception as e:
|
115
|
-
raise ValueError(f'Failed to load. Error: {e}')
|
116
|
-
|
117
|
-
def langchain_text_splitter(data: Union[str, List],
|
118
|
-
splitter: Union[str, Callable],
|
119
|
-
splitter_args: List[Any] = [],
|
120
|
-
splitter_kwargs: Dict[str, Any] = {}) -> List[str]:
|
121
|
-
"""
|
122
|
-
Splits text or a list of documents using a specified langchain text splitter.
|
123
|
-
|
124
|
-
Args:
|
125
|
-
data (Union[str, List]): The input text or list of documents to be split.
|
126
|
-
splitter (Union[str, Callable]): The name of the text splitter function or the function itself.
|
127
|
-
splitter_args (List[Any]): Positional arguments to pass to the splitter function.
|
128
|
-
splitter_kwargs (Dict[str, Any]): Keyword arguments to pass to the splitter function.
|
129
|
-
|
130
|
-
Returns:
|
131
|
-
List[str]: A list of chunks obtained by splitting the input.
|
132
|
-
|
133
|
-
Raises:
|
134
|
-
ValueError: If the specified text splitter is invalid or if the splitting fails.
|
135
|
-
"""
|
136
|
-
|
137
|
-
try:
|
138
|
-
import langchain.text_splitter as text_splitter
|
139
|
-
except ImportError:
|
140
|
-
try:
|
141
|
-
install_import(
|
142
|
-
package_name='langchain',
|
143
|
-
module_name='text_splitter'
|
144
|
-
)
|
145
|
-
import langchain.text_splitter as text_splitter
|
146
|
-
except Exception as e:
|
147
|
-
raise ImportError(f'Unable to import required module from langchain. Please make sure that langchain is installed. Error: {e}')
|
148
|
-
|
149
|
-
try:
|
150
|
-
if isinstance(splitter, str):
|
151
|
-
splitter = getattr(text_splitter, splitter)
|
152
|
-
else:
|
153
|
-
splitter = splitter
|
154
|
-
except Exception as e:
|
155
|
-
raise ValueError(f'Invalid text splitter: {splitter}. Error: {e}')
|
156
|
-
|
157
|
-
try:
|
158
|
-
splitter_obj = splitter(*splitter_args, **splitter_kwargs)
|
159
|
-
if isinstance(data, str):
|
160
|
-
chunk = splitter_obj.split_text(data)
|
161
|
-
else:
|
162
|
-
chunk = splitter_obj.split_documents(data)
|
163
|
-
return chunk
|
164
|
-
except Exception as e:
|
165
|
-
raise ValueError(f'Failed to split. Error: {e}')
|
166
|
-
|
167
|
-
# def langchain_code_splitter(doc: str,
|
168
|
-
# language: str,
|
169
|
-
# splitter_args: List[Any] = [],
|
170
|
-
# splitter_kwargs: Dict[str, Any] = {}) -> List[Any]:
|
171
|
-
# """
|
172
|
-
# Splits code into smaller chunks using a RecursiveCharacterTextSplitter specific to a language.
|
173
|
-
#
|
174
|
-
# Parameters:
|
175
|
-
# doc (str): The code document to be split.
|
176
|
-
# language (str): The programming language of the code.
|
177
|
-
# splitter_args (List[Any]): Positional arguments to pass to the splitter.
|
178
|
-
# splitter_kwargs (Dict[str, Any]): Keyword arguments to pass to the splitter.
|
179
|
-
#
|
180
|
-
# Returns:
|
181
|
-
# List[Any]: A list of Documents, each representing a chunk of the original code.
|
182
|
-
#
|
183
|
-
# Raises:
|
184
|
-
# ValueError: If the splitter fails to split the code document.
|
185
|
-
# """
|
186
|
-
# from langchain.text_splitter import RecursiveCharacterTextSplitter
|
187
|
-
#
|
188
|
-
# try:
|
189
|
-
# splitter = RecursiveCharacterTextSplitter.from_language(
|
190
|
-
# language=language, *splitter_args, **splitter_kwargs
|
191
|
-
# )
|
192
|
-
# docs = splitter.create_documents([doc])
|
193
|
-
# return docs
|
194
|
-
# except Exception as e:
|
195
|
-
# raise ValueError(f'Failed to split. Error: {e}')
|
lionagi/bridge/llama_index.py
DELETED
@@ -1,266 +0,0 @@
|
|
1
|
-
from typing import Union, Callable, List, Any, Dict, TypeVar
|
2
|
-
from ..utils.sys_util import change_dict_key, install_import, is_package_installed
|
3
|
-
from ..schema.data_node import DataNode
|
4
|
-
|
5
|
-
|
6
|
-
T = TypeVar('T', bound='DataNode')
|
7
|
-
|
8
|
-
def from_llama_index(llama_node: Any, **kwargs: Any) -> T:
|
9
|
-
"""
|
10
|
-
Converts a Llama Index node into a DataNode object.
|
11
|
-
|
12
|
-
Args:
|
13
|
-
llama_node (Any): The Llama Index node to be converted.
|
14
|
-
**kwargs: Additional keyword arguments for JSON serialization.
|
15
|
-
|
16
|
-
Returns:
|
17
|
-
T: A DataNode object created from the Llama Index node.
|
18
|
-
|
19
|
-
Example:
|
20
|
-
llama_node = LlamaIndexNode(...)
|
21
|
-
datanode = from_llama_index(llama_node, serialize_dates=True)
|
22
|
-
"""
|
23
|
-
llama_dict = llama_node.to_dict(**kwargs)
|
24
|
-
return DataNode.from_dict(llama_dict)
|
25
|
-
|
26
|
-
def to_llama_index_textnode(datanode: T, **kwargs: Any) -> Any:
|
27
|
-
"""
|
28
|
-
Converts a DataNode into a Llama Index TextNode.
|
29
|
-
|
30
|
-
Args:
|
31
|
-
datanode (T): The DataNode to be converted.
|
32
|
-
**kwargs: Additional keyword arguments to be included in the TextNode.
|
33
|
-
|
34
|
-
Returns:
|
35
|
-
Any: A Llama Index TextNode created from the DataNode.
|
36
|
-
|
37
|
-
Example:
|
38
|
-
datanode = DataNode(...)
|
39
|
-
textnode = to_llama_index_textnode(datanode, additional_arg=1)
|
40
|
-
"""
|
41
|
-
try:
|
42
|
-
from llama_index.schema import TextNode
|
43
|
-
except ImportError:
|
44
|
-
try:
|
45
|
-
install_import(
|
46
|
-
package_name='llama_index',
|
47
|
-
module_name='schema',
|
48
|
-
import_name='TextNode'
|
49
|
-
)
|
50
|
-
from llama_index.schema import TextNode
|
51
|
-
except Exception as e:
|
52
|
-
raise ImportError(f'Unable to import required module from llama_index. Please make sure that llama_index is installed. Error: {e}')
|
53
|
-
|
54
|
-
dnode = datanode.to_dict()
|
55
|
-
change_dict_key(dnode, old_key='content', new_key='text')
|
56
|
-
change_dict_key(dnode, old_key='node_id', new_key='id_')
|
57
|
-
dnode['text'] = str(dnode['text'])
|
58
|
-
|
59
|
-
dnode = {**dnode, **kwargs}
|
60
|
-
return TextNode.from_dict(dnode)
|
61
|
-
|
62
|
-
def get_llama_reader(reader: Union[str, Callable]) -> Callable:
|
63
|
-
"""
|
64
|
-
Gets a Llama Index reader function.
|
65
|
-
|
66
|
-
Args:
|
67
|
-
reader (Union[str, Callable]): The name of the reader function or the reader function itself.
|
68
|
-
|
69
|
-
Returns:
|
70
|
-
Callable: The Llama Index reader function.
|
71
|
-
|
72
|
-
Raises:
|
73
|
-
ValueError: If the specified reader is invalid.
|
74
|
-
|
75
|
-
Example:
|
76
|
-
reader = get_llama_reader("SimpleDirectoryReader")
|
77
|
-
# or for a custom function
|
78
|
-
def custom_reader(): pass
|
79
|
-
reader = get_llama_reader(custom_reader)
|
80
|
-
"""
|
81
|
-
|
82
|
-
try:
|
83
|
-
if isinstance(reader, str):
|
84
|
-
if reader == 'SimpleDirectoryReader':
|
85
|
-
try:
|
86
|
-
from llama_index import SimpleDirectoryReader
|
87
|
-
return SimpleDirectoryReader
|
88
|
-
except ImportError or ModuleNotFoundError:
|
89
|
-
try:
|
90
|
-
install_import(
|
91
|
-
package_name='llama_index',
|
92
|
-
import_name='SimpleDirectoryReader'
|
93
|
-
)
|
94
|
-
from llama_index import SimpleDirectoryReader
|
95
|
-
return SimpleDirectoryReader
|
96
|
-
except Exception as e:
|
97
|
-
raise ImportError(f'Failed to import SimpleDirectoryReader. Error: {e}')
|
98
|
-
else:
|
99
|
-
try:
|
100
|
-
from llama_index import download_loader
|
101
|
-
return download_loader(reader)
|
102
|
-
except ImportError:
|
103
|
-
try:
|
104
|
-
install_import(
|
105
|
-
package_name='llama_index',
|
106
|
-
import_name='download_loader'
|
107
|
-
)
|
108
|
-
return download_loader(reader)
|
109
|
-
except Exception as e:
|
110
|
-
raise ImportError(f'Failed to import download_loader from LlamaIndex. Error: {e}')
|
111
|
-
else:
|
112
|
-
return reader
|
113
|
-
except Exception as e:
|
114
|
-
raise ValueError(f'Invalid reader: {reader}, Error: {e}')
|
115
|
-
|
116
|
-
def llama_index_reader(reader: Union[str, Callable],
|
117
|
-
reader_args: List[Any] = [],
|
118
|
-
reader_kwargs: Dict[str, Any] = {},
|
119
|
-
load_data_args: List[Any] = [],
|
120
|
-
load_data_kwargs: Dict[str, Any] = {}) -> List[Any]:
|
121
|
-
"""
|
122
|
-
Loads documents using a specified Llama Index reader.
|
123
|
-
|
124
|
-
Args:
|
125
|
-
reader (Union[str, Callable]): The name of the reader function or the reader function itself.
|
126
|
-
reader_args (List[Any]): Positional arguments to pass to the reader function.
|
127
|
-
reader_kwargs (Dict[str, Any]): Keyword arguments to pass to the reader function.
|
128
|
-
load_data_args (List[Any]): Positional arguments for the load_data method.
|
129
|
-
load_data_kwargs (Dict[str, Any]): Keyword arguments for the load_data method.
|
130
|
-
|
131
|
-
Returns:
|
132
|
-
List[Any]: A list of documents loaded by the reader.
|
133
|
-
|
134
|
-
Raises:
|
135
|
-
ValueError: If the specified reader is invalid or if the reader fails to load documents.
|
136
|
-
|
137
|
-
Example:
|
138
|
-
documents = llama_index_reader("SimpleDirectoryReader", reader_args=["/path/to/data"])
|
139
|
-
"""
|
140
|
-
reader = get_llama_reader(reader)
|
141
|
-
|
142
|
-
try:
|
143
|
-
loader = reader(*reader_args, **reader_kwargs)
|
144
|
-
documents = loader.load_data(*load_data_args, **load_data_kwargs)
|
145
|
-
return documents
|
146
|
-
|
147
|
-
except Exception as e:
|
148
|
-
raise ValueError(f'Failed to read. Error: {e}')
|
149
|
-
|
150
|
-
def get_llama_parser(parser: Union[str, Callable]) -> Callable:
|
151
|
-
"""
|
152
|
-
Gets a Llama Index parser function or object.
|
153
|
-
|
154
|
-
Args:
|
155
|
-
parser (Union[str, Callable]): The name of the parser function or the parser function itself.
|
156
|
-
|
157
|
-
Returns:
|
158
|
-
Callable: The Llama Index parser function or object.
|
159
|
-
|
160
|
-
Raises:
|
161
|
-
ValueError: If the specified parser is invalid.
|
162
|
-
|
163
|
-
Example:
|
164
|
-
parser = get_llama_parser("DefaultNodeParser")
|
165
|
-
# or for a custom function
|
166
|
-
def custom_parser(): pass
|
167
|
-
parser = get_llama_parser(custom_parser)
|
168
|
-
"""
|
169
|
-
|
170
|
-
try:
|
171
|
-
import llama_index.node_parser as node_parser
|
172
|
-
except ImportError:
|
173
|
-
try:
|
174
|
-
install_import(
|
175
|
-
package_name='llama_index',
|
176
|
-
module_name='node_parser'
|
177
|
-
)
|
178
|
-
import llama_index.node_parser as node_parser
|
179
|
-
except ImportError:
|
180
|
-
raise ImportError('Failed to import Llama Index. Please install Llama Index to use this function.')
|
181
|
-
except Exception as e:
|
182
|
-
raise ValueError(f'Invalid node parser: {parser}. Error: {e}')
|
183
|
-
|
184
|
-
try:
|
185
|
-
import llama_index.text_splitter as text_splitter
|
186
|
-
except ImportError:
|
187
|
-
try:
|
188
|
-
install_import(
|
189
|
-
package_name='llama_index',
|
190
|
-
module_name='text_splitter'
|
191
|
-
)
|
192
|
-
import llama_index.text_splitter as text_splitter
|
193
|
-
except ImportError:
|
194
|
-
raise ImportError('Failed to import Llama Index. Please install Llama Index to use this function.')
|
195
|
-
|
196
|
-
try:
|
197
|
-
if parser == 'CodeSplitter':
|
198
|
-
if not is_package_installed('tree_sitter_languages'):
|
199
|
-
install_import(package_name='tree_sitter_languages')
|
200
|
-
|
201
|
-
a = getattr(node_parser, parser)
|
202
|
-
if a is not None:
|
203
|
-
return a
|
204
|
-
else:
|
205
|
-
raise ImportError(f'Failed to import {parser} from Llama Index.')
|
206
|
-
except Exception as e1:
|
207
|
-
try:
|
208
|
-
if isinstance(parser, str):
|
209
|
-
return getattr(text_splitter, parser)
|
210
|
-
else:
|
211
|
-
return parser
|
212
|
-
except Exception as e2:
|
213
|
-
raise ValueError(f'Invalid node parser: {parser}. Error: {e1}, {e2}')
|
214
|
-
|
215
|
-
|
216
|
-
def llama_index_node_parser(documents: List[Any],
|
217
|
-
parser: Union[str, Callable],
|
218
|
-
parser_args: List[Any] = [],
|
219
|
-
parser_kwargs: Dict[str, Any] = {},
|
220
|
-
parsing_kwargs: Dict[str, Any] = {}) -> List[Any]:
|
221
|
-
"""
|
222
|
-
Parses documents into nodes using a specified Llama Index node parser.
|
223
|
-
|
224
|
-
Args:
|
225
|
-
documents (List[Any]): The documents to parse.
|
226
|
-
parser (Union[str, Callable]): The name of the parser function or the parser function itself.
|
227
|
-
parser_args (List[Any]): Positional arguments to pass to the parser function.
|
228
|
-
parser_kwargs (Dict[str, Any]): Keyword arguments to pass to the parser function.
|
229
|
-
parsing_kwargs (Dict[str, Any]): Keyword arguments for the parsing process.
|
230
|
-
|
231
|
-
Returns:
|
232
|
-
List[Any]: A list of nodes parsed from the documents.
|
233
|
-
|
234
|
-
Raises:
|
235
|
-
ValueError: If the specified parser is invalid or if the parser fails to parse the documents.
|
236
|
-
|
237
|
-
Example:
|
238
|
-
nodes = llama_index_node_parser(documents, "DefaultNodeParser")
|
239
|
-
"""
|
240
|
-
|
241
|
-
try:
|
242
|
-
parser = get_llama_parser(parser)
|
243
|
-
parser_obj = parser(*parser_args, **parser_kwargs)
|
244
|
-
nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)
|
245
|
-
return nodes
|
246
|
-
|
247
|
-
except ImportError as e:
|
248
|
-
module_name = str(e).split("\'")[-2]
|
249
|
-
try:
|
250
|
-
install_import(package_name=module_name)
|
251
|
-
parser = get_llama_parser(parser)
|
252
|
-
parser_obj = parser(*parser_args, **parser_kwargs)
|
253
|
-
nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)
|
254
|
-
return nodes
|
255
|
-
except Exception as e:
|
256
|
-
raise ImportError(f'Failed to install and import {module_name}. Error: {e}')
|
257
|
-
|
258
|
-
|
259
|
-
except Exception as e1:
|
260
|
-
try:
|
261
|
-
parser_obj = parser.from_defaults(*parser_args, **parser_kwargs)
|
262
|
-
nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)
|
263
|
-
return nodes
|
264
|
-
except Exception as e2:
|
265
|
-
raise ValueError(f'Failed to parse. Error: {e1}, {e2}')
|
266
|
-
|
lionagi/core/branch/__init__.py
DELETED
File without changes
|