lionagi 0.0.112__py3-none-any.whl → 0.0.113__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (94) hide show
  1. lionagi/__init__.py +3 -3
  2. lionagi/bridge/__init__.py +7 -0
  3. lionagi/bridge/langchain.py +131 -0
  4. lionagi/bridge/llama_index.py +157 -0
  5. lionagi/configs/__init__.py +7 -0
  6. lionagi/configs/oai_configs.py +49 -0
  7. lionagi/configs/openrouter_config.py +49 -0
  8. lionagi/core/__init__.py +8 -2
  9. lionagi/core/instruction_sets.py +1 -3
  10. lionagi/core/messages.py +2 -2
  11. lionagi/core/sessions.py +174 -27
  12. lionagi/datastore/__init__.py +1 -0
  13. lionagi/loader/__init__.py +9 -4
  14. lionagi/loader/chunker.py +157 -0
  15. lionagi/loader/reader.py +124 -0
  16. lionagi/objs/__init__.py +7 -0
  17. lionagi/objs/messenger.py +163 -0
  18. lionagi/objs/tool_registry.py +247 -0
  19. lionagi/schema/__init__.py +11 -0
  20. lionagi/schema/base_schema.py +239 -0
  21. lionagi/schema/base_tool.py +9 -0
  22. lionagi/schema/data_logger.py +94 -0
  23. lionagi/services/__init__.py +14 -0
  24. lionagi/{service_/oai.py → services/base_api_service.py} +49 -82
  25. lionagi/{endpoint/base_endpoint.py → services/chatcompletion.py} +19 -22
  26. lionagi/services/oai.py +34 -0
  27. lionagi/services/openrouter.py +32 -0
  28. lionagi/{service_/service_utils.py → services/service_objs.py} +0 -1
  29. lionagi/structure/__init__.py +7 -0
  30. lionagi/structure/relationship.py +128 -0
  31. lionagi/structure/structure.py +160 -0
  32. lionagi/tests/test_flatten_util.py +426 -0
  33. lionagi/tools/__init__.py +0 -5
  34. lionagi/tools/coder.py +1 -0
  35. lionagi/tools/scorer.py +1 -0
  36. lionagi/tools/validator.py +1 -0
  37. lionagi/utils/__init__.py +46 -20
  38. lionagi/utils/api_util.py +86 -0
  39. lionagi/utils/call_util.py +347 -0
  40. lionagi/utils/flat_util.py +540 -0
  41. lionagi/utils/io_util.py +102 -0
  42. lionagi/utils/load_utils.py +190 -0
  43. lionagi/utils/sys_util.py +191 -0
  44. lionagi/utils/tool_util.py +92 -0
  45. lionagi/utils/type_util.py +81 -0
  46. lionagi/version.py +1 -1
  47. {lionagi-0.0.112.dist-info → lionagi-0.0.113.dist-info}/METADATA +37 -13
  48. lionagi-0.0.113.dist-info/RECORD +84 -0
  49. lionagi/endpoint/chat_completion.py +0 -20
  50. lionagi/endpoint/endpoint_utils.py +0 -0
  51. lionagi/llm_configs.py +0 -21
  52. lionagi/loader/load_utils.py +0 -161
  53. lionagi/schema.py +0 -275
  54. lionagi/service_/__init__.py +0 -6
  55. lionagi/service_/base_service.py +0 -48
  56. lionagi/service_/openrouter.py +0 -1
  57. lionagi/services.py +0 -1
  58. lionagi/tools/tool_utils.py +0 -75
  59. lionagi/utils/sys_utils.py +0 -799
  60. lionagi-0.0.112.dist-info/RECORD +0 -67
  61. /lionagi/{core/responses.py → datastore/chroma.py} +0 -0
  62. /lionagi/{endpoint/assistants.py → datastore/deeplake.py} +0 -0
  63. /lionagi/{endpoint/audio.py → datastore/elasticsearch.py} +0 -0
  64. /lionagi/{endpoint/embeddings.py → datastore/lantern.py} +0 -0
  65. /lionagi/{endpoint/files.py → datastore/pinecone.py} +0 -0
  66. /lionagi/{endpoint/fine_tuning.py → datastore/postgres.py} +0 -0
  67. /lionagi/{endpoint/images.py → datastore/qdrant.py} +0 -0
  68. /lionagi/{endpoint/messages.py → schema/base_condition.py} +0 -0
  69. /lionagi/{service_ → services}/anthropic.py +0 -0
  70. /lionagi/{service_ → services}/anyscale.py +0 -0
  71. /lionagi/{service_ → services}/azure.py +0 -0
  72. /lionagi/{service_ → services}/bedrock.py +0 -0
  73. /lionagi/{service_ → services}/everlyai.py +0 -0
  74. /lionagi/{service_ → services}/gemini.py +0 -0
  75. /lionagi/{service_ → services}/gpt4all.py +0 -0
  76. /lionagi/{service_ → services}/huggingface.py +0 -0
  77. /lionagi/{service_ → services}/litellm.py +0 -0
  78. /lionagi/{service_ → services}/localai.py +0 -0
  79. /lionagi/{service_ → services}/mistralai.py +0 -0
  80. /lionagi/{service_ → services}/ollama.py +0 -0
  81. /lionagi/{service_ → services}/openllm.py +0 -0
  82. /lionagi/{service_ → services}/perplexity.py +0 -0
  83. /lionagi/{service_ → services}/predibase.py +0 -0
  84. /lionagi/{service_ → services}/rungpt.py +0 -0
  85. /lionagi/{service_ → services}/vllm.py +0 -0
  86. /lionagi/{service_ → services}/xinference.py +0 -0
  87. /lionagi/{endpoint → tests}/__init__.py +0 -0
  88. /lionagi/{endpoint/models.py → tools/planner.py} +0 -0
  89. /lionagi/{endpoint/moderations.py → tools/prompter.py} +0 -0
  90. /lionagi/{endpoint/runs.py → tools/sandbox.py} +0 -0
  91. /lionagi/{endpoint/threads.py → tools/summarizer.py} +0 -0
  92. {lionagi-0.0.112.dist-info → lionagi-0.0.113.dist-info}/LICENSE +0 -0
  93. {lionagi-0.0.112.dist-info → lionagi-0.0.113.dist-info}/WHEEL +0 -0
  94. {lionagi-0.0.112.dist-info → lionagi-0.0.113.dist-info}/top_level.txt +0 -0
lionagi/__init__.py CHANGED
@@ -18,10 +18,10 @@ import logging
18
18
  from .version import __version__
19
19
 
20
20
  from .utils import *
21
- from .endpoint import *
22
- from .loader import *
23
- from .service_ import *
21
+ from .schema import *
22
+ from .structure import *
24
23
  from .core import *
24
+ from .objs import *
25
25
  # from .datastore import *
26
26
  # from .structure import *
27
27
 
@@ -0,0 +1,7 @@
1
+ from .langchain import from_langchain
2
+ from .llama_index import from_llama_index
3
+
4
+ __all__ = [
5
+ "from_langchain",
6
+ "from_llama_index"
7
+ ]
@@ -0,0 +1,131 @@
1
+ from typing import Union, Callable, List, Dict, Any
2
+ from ..schema.base_schema import T, DataNode
3
+ from ..utils.sys_util import change_dict_key
4
+
5
+
6
+ def from_langchain(lc_doc: Any) -> T:
7
+ """
8
+ Converts a langchain document into a DataNode object.
9
+
10
+ Parameters:
11
+ lc_doc (Any): The langchain document to be converted.
12
+
13
+ Returns:
14
+ DataNode: A DataNode object created from the langchain document.
15
+ """
16
+ info_json = lc_doc.to_json()
17
+ info_node = {'lc_id': info_json['id']}
18
+ info_node = {**info_node, **info_json['kwargs']}
19
+ return DataNode(**info_node)
20
+
21
+ def to_langchain_document(datanode: T, **kwargs: Any) -> Any:
22
+ """
23
+ Converts a DataNode into a langchain Document.
24
+
25
+ Parameters:
26
+ datanode (DataNode): The DataNode to be converted.
27
+
28
+ **kwargs: Additional keyword arguments to be included in the Document.
29
+
30
+ Returns:
31
+ Any: A langchain Document created from the DataNode.
32
+ """
33
+ from langchain.schema import Document
34
+
35
+ dnode = datanode.to_dict()
36
+ change_dict_key(dnode, old_key='content', new_key='page_content')
37
+ change_dict_key(dnode, old_key='lc_id', new_key='id_')
38
+ dnode = {**dnode, **kwargs}
39
+ return Document(**dnode)
40
+
41
+ def langchain_loader(loader: Union[str, Callable],
42
+ loader_args: List[Any] = [],
43
+ loader_kwargs: Dict[str, Any] = {}) -> Any:
44
+ """
45
+ Loads data using a specified langchain loader.
46
+
47
+ Parameters:
48
+ loader (Union[str, Callable]): The name of the loader function or the loader function itself.
49
+
50
+ loader_args (List[Any]): Positional arguments to pass to the loader function.
51
+
52
+ loader_kwargs (Dict[str, Any]): Keyword arguments to pass to the loader function.
53
+
54
+ Returns:
55
+ Any: The data loaded by the loader function.
56
+
57
+ Raises:
58
+ ValueError: If the specified loader is invalid or if the loader fails to load data.
59
+ """
60
+ import langchain.document_loaders as document_loaders
61
+
62
+ try:
63
+ if isinstance(loader, str):
64
+ loader = getattr(document_loaders, loader)
65
+ else:
66
+ loader = loader
67
+ except Exception as e:
68
+ raise ValueError(f'Invalid loader: {loader}. Error: {e}')
69
+
70
+ try:
71
+ loader_obj = loader(*loader_args, **loader_kwargs)
72
+ data = loader_obj.load()
73
+ return data
74
+ except Exception as e:
75
+ raise ValueError(f'Failed to load. Error: {e}')
76
+
77
+ def langchain_text_splitter(data: Union[str, List],
78
+ splitter: Union[str, Callable],
79
+ splitter_args: List[Any] = [],
80
+ splitter_kwargs: Dict[str, Any] = {}) -> List[str]:
81
+
82
+ import langchain.text_splitter as text_splitter
83
+
84
+ try:
85
+ if isinstance(splitter, str):
86
+ splitter = getattr(text_splitter, splitter)
87
+ else:
88
+ splitter = splitter
89
+ except Exception as e:
90
+ raise ValueError(f'Invalid text splitter: {splitter}. Error: {e}')
91
+
92
+ try:
93
+ splitter_obj = splitter(*splitter_args, **splitter_kwargs)
94
+ if isinstance(data, str):
95
+ chunk = splitter_obj.split_text(data)
96
+ else:
97
+ chunk = splitter_obj.split_documents(data)
98
+ return chunk
99
+ except Exception as e:
100
+ raise ValueError(f'Failed to split. Error: {e}')
101
+
102
+ # def langchain_code_splitter(doc: str,
103
+ # language: str,
104
+ # splitter_args: List[Any] = [],
105
+ # splitter_kwargs: Dict[str, Any] = {}) -> List[Any]:
106
+ # """
107
+ # Splits code into smaller chunks using a RecursiveCharacterTextSplitter specific to a language.
108
+ #
109
+ # Parameters:
110
+ # doc (str): The code document to be split.
111
+ # language (str): The programming language of the code.
112
+ # splitter_args (List[Any]): Positional arguments to pass to the splitter.
113
+ # splitter_kwargs (Dict[str, Any]): Keyword arguments to pass to the splitter.
114
+ #
115
+ # Returns:
116
+ # List[Any]: A list of Documents, each representing a chunk of the original code.
117
+ #
118
+ # Raises:
119
+ # ValueError: If the splitter fails to split the code document.
120
+ # """
121
+ # from langchain.text_splitter import RecursiveCharacterTextSplitter
122
+ #
123
+ # try:
124
+ # splitter = RecursiveCharacterTextSplitter.from_language(
125
+ # language=language, *splitter_args, **splitter_kwargs
126
+ # )
127
+ # docs = splitter.create_documents([doc])
128
+ # return docs
129
+ # except Exception as e:
130
+ # raise ValueError(f'Failed to split. Error: {e}')
131
+ #
@@ -0,0 +1,157 @@
1
+ from typing import Union, Callable, List, Any, Dict
2
+ from ..schema.base_schema import DataNode, T
3
+ from ..utils.sys_util import change_dict_key
4
+
5
+
6
+ def from_llama_index(llama_node: Any, **kwargs: Any) -> T:
7
+ """
8
+ Converts a Llama Index node into a DataNode object.
9
+
10
+ Parameters:
11
+ llama_node (Any): The Llama Index node to be converted.
12
+
13
+ **kwargs: Additional keyword arguments for JSON serialization.
14
+
15
+ Returns:
16
+ DataNode: A DataNode object created from the Llama Index node.
17
+ """
18
+ llama_dict = llama_node.to_dict(**kwargs)
19
+ return DataNode.from_dict(llama_dict)
20
+
21
+ def to_llama_index_textnode(datanode: T, **kwargs: Any) -> Any:
22
+ """
23
+ Converts a DataNode into a Llama Index TextNode.
24
+
25
+ Parameters:
26
+ datanode (DataNode): The DataNode to be converted.
27
+
28
+ **kwargs: Additional keyword arguments to be included in the TextNode.
29
+
30
+ Returns:
31
+ TextNode: A Llama Index TextNode created from the DataNode.
32
+ """
33
+ # to llama_index textnode
34
+ from llama_index.schema import TextNode
35
+
36
+ dnode = datanode.to_dict()
37
+ change_dict_key(dnode, old_key='content', new_key='text')
38
+ change_dict_key(dnode, old_key='node_id', new_key='id_')
39
+
40
+ dnode = {**dnode, **kwargs}
41
+ return TextNode.from_dict(dnode)
42
+
43
+ def get_llama_reader(reader: Union[str, Callable]) -> Callable:
44
+ """
45
+ Gets a Llama Index reader function.
46
+
47
+ Parameters:
48
+ reader (Union[str, Callable]): The name of the reader function or the reader function itself.
49
+
50
+ Returns:
51
+ Callable: The Llama Index reader function.
52
+
53
+ Raises:
54
+ ValueError: If the specified reader is invalid.
55
+ """
56
+ try:
57
+ if isinstance(reader, str):
58
+ if reader == 'SimpleDirectoryReader':
59
+ from llama_index import SimpleDirectoryReader
60
+ return SimpleDirectoryReader
61
+ else:
62
+ from llama_index import download_loader
63
+ return download_loader(reader)
64
+ else:
65
+ return reader
66
+ except Exception as e:
67
+ raise ValueError(f'Invalid reader: {reader}, Error: {e}')
68
+
69
+ def llama_index_reader(reader: Union[str, Callable],
70
+ reader_args: List[Any] = [],
71
+ reader_kwargs: Dict[str, Any] = {},
72
+ load_data_args: List[Any] = [],
73
+ load_data_kwargs: Dict[str, Any] = {}) -> List[Any]:
74
+ """
75
+ Loads documents using a specified Llama Index reader.
76
+
77
+ Parameters:
78
+ reader (Union[str, Callable]): The name of the reader function or the reader function itself.
79
+
80
+ reader_args (List[Any]): Positional arguments to pass to the reader function.
81
+
82
+ reader_kwargs (Dict[str, Any]): Keyword arguments to pass to the reader function.
83
+
84
+ load_data_args (List[Any]): Positional arguments for the load_data method.
85
+
86
+ load_data_kwargs (Dict[str, Any]): Keyword arguments for the load_data method.
87
+
88
+ Returns:
89
+ List[Any]: A list of documents loaded by the reader.
90
+
91
+ Raises:
92
+ ValueError: If the specified reader is invalid or if the reader fails to load documents.
93
+ """
94
+ reader = get_llama_reader(reader)
95
+
96
+ try:
97
+ loader = reader(*reader_args, **reader_kwargs)
98
+ documents = loader.load_data(*load_data_args, **load_data_kwargs)
99
+ return documents
100
+
101
+ except Exception as e:
102
+ raise ValueError(f'Failed to read. Error: {e}')
103
+
104
+ def get_llama_parser(parser: Union[str, Callable]) -> Callable:
105
+ import llama_index.node_parser as node_parser
106
+ import llama_index.text_splitter as text_splitter
107
+
108
+ try:
109
+ return getattr(node_parser, parser)
110
+ except Exception as e1:
111
+ try:
112
+ if isinstance(parser, str):
113
+ return getattr(text_splitter, parser)
114
+ else:
115
+ return parser
116
+ except Exception as e2:
117
+ raise ValueError(f'Invalid node parser: {parser}. Error: {e1}, {e2}')
118
+
119
+
120
+ def llama_index_node_parser(documents: List[Any],
121
+ parser: Union[str, Callable],
122
+ parser_args: List[Any] = [],
123
+ parser_kwargs: Dict[str, Any] = {},
124
+ parsing_kwargs: Dict[str, Any] = {}) -> List[Any]:
125
+ """
126
+ Parses documents into nodes using a specified Llama Index node parser.
127
+
128
+ Parameters:
129
+ documents (List[Any]): The documents to parse.
130
+
131
+ parser (Union[str, Callable]): The name of the parser function or the parser function itself.
132
+
133
+ parser_args (List[Any]): Positional arguments to pass to the parser function.
134
+
135
+ parser_kwargs (Dict[str, Any]): Keyword arguments to pass to the parser function.
136
+
137
+ Returns:
138
+ List[Any]: A list of nodes parsed from the documents.
139
+
140
+ Raises:
141
+ ValueError: If the specified parser is invalid or if the parser fails to parse the documents.
142
+ """
143
+ parser = get_llama_parser(parser)
144
+
145
+ try:
146
+ parser_obj = parser(*parser_args, **parser_kwargs)
147
+ nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)
148
+ return nodes
149
+
150
+ except Exception as e1:
151
+ try:
152
+ parser_obj = parser.from_defaults(*parser_args, **parser_kwargs)
153
+ nodes = parser_obj.get_nodes_from_documents(documents, **parsing_kwargs)
154
+ return nodes
155
+ except Exception as e2:
156
+ raise ValueError(f'Failed to parse. Error: {e1}, {e2}')
157
+
@@ -0,0 +1,7 @@
1
+ from .oai_configs import oai_schema
2
+ from .openrouter_config import openrouter_schema
3
+
4
+ __all__ = [
5
+ "oai_schema",
6
+ "openrouter_schema"
7
+ ]
@@ -0,0 +1,49 @@
1
+ oai_chat_llmconfig = {
2
+ "model": "gpt-4-1106-preview",
3
+ "frequency_penalty": 0,
4
+ "max_tokens": None,
5
+ "n": 1,
6
+ "presence_penalty": 0,
7
+ "response_format": {"type": "text"},
8
+ "seed": None,
9
+ "stop": None,
10
+ "stream": False,
11
+ "temperature": 0.7,
12
+ "top_p": 1,
13
+ "tools": None,
14
+ "tool_choice": "none",
15
+ "user": None
16
+ }
17
+
18
+ oai_chat_schema = {
19
+ "required" : ["model", "frequency_penalty", "n", "presence_penalty", "response_format", "temperature", "top_p"],
20
+ "optional": ["seed", "stop", "stream", "tools", "tool_choice", "user", "max_tokens"],
21
+ "input": "messages",
22
+ "config": oai_chat_llmconfig
23
+ }
24
+
25
+ oai_finetune_llmconfig = {
26
+ "model": "gpt-3.5-turbo",
27
+ "hyperparameters": {
28
+ "batch_size": "auto",
29
+ "learning_rate_multiplier": "auto",
30
+ "n_epochs": "auto"
31
+ },
32
+ "suffix": None,
33
+ "training_file": None,
34
+ }
35
+
36
+ oai_finetune_schema = {
37
+ "required" : ["model", "training_file"],
38
+ "optional": ["hyperparameters", "suffix", "validate_file"],
39
+ "input": ["training_file"],
40
+ "config": oai_finetune_llmconfig
41
+ }
42
+
43
+
44
+ oai_schema = {
45
+
46
+ "chat": oai_chat_schema,
47
+ "finetune": oai_finetune_schema
48
+
49
+ }
@@ -0,0 +1,49 @@
1
+ openrouter_chat_llmconfig = {
2
+ "model": "gpt-4-1106-preview",
3
+ "frequency_penalty": 0,
4
+ "max_tokens": None,
5
+ "n": 1,
6
+ "presence_penalty": 0,
7
+ "response_format": {"type": "text"},
8
+ "seed": None,
9
+ "stop": None,
10
+ "stream": False,
11
+ "temperature": 0.7,
12
+ "top_p": 1,
13
+ "tools": None,
14
+ "tool_choice": "none",
15
+ "user": None
16
+ }
17
+
18
+ openrouter_chat_schema = {
19
+ "required" : ["model", "frequency_penalty", "n", "presence_penalty", "response_format", "temperature", "top_p"],
20
+ "optional": ["seed", "stop", "stream", "tools", "tool_choice", "user", "max_tokens"],
21
+ "input": "messages",
22
+ "config": openrouter_chat_llmconfig
23
+ }
24
+
25
+ openrouter_finetune_llmconfig = {
26
+ "model": "gpt-3.5-turbo",
27
+ "hyperparameters": {
28
+ "batch_size": "auto",
29
+ "learning_rate_multiplier": "auto",
30
+ "n_epochs": "auto"
31
+ },
32
+ "suffix": None,
33
+ "training_file": None,
34
+ }
35
+
36
+ openrouter_finetune_schema = {
37
+ "required" : ["model", "training_file"],
38
+ "optional": ["hyperparameters", "suffix", "validate_file"],
39
+ "input": ["training_file"],
40
+ "config": openrouter_finetune_llmconfig
41
+ }
42
+
43
+
44
+ openrouter_schema = {
45
+
46
+ "chat": openrouter_chat_schema,
47
+ "finetune": openrouter_finetune_schema
48
+
49
+ }
lionagi/core/__init__.py CHANGED
@@ -1,9 +1,15 @@
1
- from .messages import Message
1
+ # from .messages import Response, Instruction, System
2
2
  from .conversations import Conversation
3
3
  from .sessions import Session
4
+
5
+ # from .instruction_sets import InstructionSet
4
6
  # from .flows.flow import Flow
5
7
 
6
8
 
7
9
  __all__ = [
8
- "Session", "Message", "Conversation"
10
+ # "Response",
11
+ # "Instruction",
12
+ # "System",
13
+ "Conversation",
14
+ "Session", #"Flow", "InstructionSet"
9
15
  ]
@@ -1,3 +1 @@
1
- # TODO
2
-
3
- # a structured set of instruction nodes
1
+ # dynamically structured preconfigured instructions
lionagi/core/messages.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from datetime import datetime
2
2
  import json
3
- from ..utils.sys_utils import create_id, l_call
3
+ from ..utils import create_id, lcall
4
4
  from ..schema import DataLogger
5
5
 
6
6
 
@@ -61,7 +61,7 @@ class Message:
61
61
 
62
62
  name (str): The name associated with the message. Default is None.
63
63
  """
64
- if sum(l_call([system, instruction, response], bool)) > 1:
64
+ if sum(lcall([system, instruction, response], bool)) > 1:
65
65
  raise ValueError("Error: Message cannot have more than one role.")
66
66
 
67
67
  else: