lionagi 0.0.316__py3-none-any.whl → 0.1.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/core/__init__.py +19 -8
- lionagi/core/agent/__init__.py +0 -3
- lionagi/core/agent/base_agent.py +25 -30
- lionagi/core/branch/__init__.py +0 -4
- lionagi/core/branch/{base_branch.py → base.py} +12 -13
- lionagi/core/branch/branch.py +22 -19
- lionagi/core/branch/executable_branch.py +0 -347
- lionagi/core/branch/{branch_flow_mixin.py → flow_mixin.py} +5 -5
- lionagi/core/direct/__init__.py +10 -1
- lionagi/core/direct/cot.py +61 -26
- lionagi/core/direct/plan.py +10 -8
- lionagi/core/direct/predict.py +5 -5
- lionagi/core/direct/react.py +8 -8
- lionagi/core/direct/score.py +4 -4
- lionagi/core/direct/select.py +4 -4
- lionagi/core/direct/utils.py +7 -4
- lionagi/core/direct/vote.py +2 -2
- lionagi/core/execute/base_executor.py +47 -0
- lionagi/core/execute/branch_executor.py +296 -0
- lionagi/core/execute/instruction_map_executor.py +179 -0
- lionagi/core/execute/neo4j_executor.py +381 -0
- lionagi/core/execute/structure_executor.py +314 -0
- lionagi/core/flow/monoflow/ReAct.py +20 -20
- lionagi/core/flow/monoflow/chat.py +6 -6
- lionagi/core/flow/monoflow/chat_mixin.py +23 -33
- lionagi/core/flow/monoflow/followup.py +14 -15
- lionagi/core/flow/polyflow/chat.py +15 -12
- lionagi/core/{prompt/action_template.py → form/action_form.py} +2 -2
- lionagi/core/{prompt → form}/field_validator.py +40 -31
- lionagi/core/form/form.py +302 -0
- lionagi/core/form/mixin.py +214 -0
- lionagi/core/{prompt/scored_template.py → form/scored_form.py} +2 -2
- lionagi/core/generic/__init__.py +37 -0
- lionagi/core/generic/action.py +26 -0
- lionagi/core/generic/component.py +455 -0
- lionagi/core/generic/condition.py +44 -0
- lionagi/core/generic/data_logger.py +305 -0
- lionagi/core/generic/edge.py +162 -0
- lionagi/core/generic/mail.py +90 -0
- lionagi/core/generic/mailbox.py +36 -0
- lionagi/core/generic/node.py +285 -0
- lionagi/core/generic/relation.py +70 -0
- lionagi/core/generic/signal.py +22 -0
- lionagi/core/generic/structure.py +362 -0
- lionagi/core/generic/transfer.py +20 -0
- lionagi/core/generic/work.py +40 -0
- lionagi/core/graph/graph.py +126 -0
- lionagi/core/graph/tree.py +190 -0
- lionagi/core/mail/__init__.py +0 -8
- lionagi/core/mail/mail_manager.py +15 -12
- lionagi/core/mail/schema.py +9 -2
- lionagi/core/messages/__init__.py +0 -3
- lionagi/core/messages/schema.py +17 -225
- lionagi/core/session/__init__.py +0 -3
- lionagi/core/session/session.py +24 -22
- lionagi/core/tool/__init__.py +3 -1
- lionagi/core/tool/tool.py +28 -0
- lionagi/core/tool/tool_manager.py +75 -75
- lionagi/experimental/directive/evaluator/__init__.py +0 -0
- lionagi/experimental/directive/evaluator/ast_evaluator.py +115 -0
- lionagi/experimental/directive/evaluator/base_evaluator.py +202 -0
- lionagi/experimental/directive/evaluator/sandbox_.py +14 -0
- lionagi/experimental/directive/evaluator/script_engine.py +83 -0
- lionagi/experimental/directive/parser/__init__.py +0 -0
- lionagi/experimental/directive/parser/base_parser.py +215 -0
- lionagi/experimental/directive/schema.py +36 -0
- lionagi/experimental/directive/template_/__init__.py +0 -0
- lionagi/experimental/directive/template_/base_template.py +63 -0
- lionagi/experimental/tool/__init__.py +0 -0
- lionagi/experimental/tool/function_calling.py +43 -0
- lionagi/experimental/tool/manual.py +66 -0
- lionagi/experimental/tool/schema.py +59 -0
- lionagi/experimental/tool/tool_manager.py +138 -0
- lionagi/experimental/tool/util.py +16 -0
- lionagi/experimental/work/__init__.py +0 -0
- lionagi/experimental/work/_logger.py +25 -0
- lionagi/experimental/work/exchange.py +0 -0
- lionagi/experimental/work/schema.py +30 -0
- lionagi/experimental/work/tests.py +72 -0
- lionagi/experimental/work/util.py +0 -0
- lionagi/experimental/work/work_function.py +89 -0
- lionagi/experimental/work/worker.py +12 -0
- lionagi/integrations/bridge/autogen_/__init__.py +0 -0
- lionagi/integrations/bridge/autogen_/autogen_.py +124 -0
- lionagi/integrations/bridge/llamaindex_/get_index.py +294 -0
- lionagi/integrations/bridge/llamaindex_/llama_pack.py +227 -0
- lionagi/integrations/bridge/transformers_/__init__.py +0 -0
- lionagi/integrations/bridge/transformers_/install_.py +36 -0
- lionagi/integrations/chunker/chunk.py +7 -7
- lionagi/integrations/config/oai_configs.py +5 -5
- lionagi/integrations/config/ollama_configs.py +1 -1
- lionagi/integrations/config/openrouter_configs.py +1 -1
- lionagi/integrations/loader/load.py +6 -6
- lionagi/integrations/loader/load_util.py +8 -8
- lionagi/integrations/storage/__init__.py +3 -0
- lionagi/integrations/storage/neo4j.py +673 -0
- lionagi/integrations/storage/storage_util.py +289 -0
- lionagi/integrations/storage/to_csv.py +63 -0
- lionagi/integrations/storage/to_excel.py +67 -0
- lionagi/libs/ln_api.py +3 -3
- lionagi/libs/ln_knowledge_graph.py +405 -0
- lionagi/libs/ln_parse.py +43 -6
- lionagi/libs/ln_queue.py +101 -0
- lionagi/libs/ln_tokenizer.py +57 -0
- lionagi/libs/ln_validate.py +288 -0
- lionagi/libs/sys_util.py +29 -7
- lionagi/lions/__init__.py +0 -0
- lionagi/lions/coder/__init__.py +0 -0
- lionagi/lions/coder/add_feature.py +20 -0
- lionagi/lions/coder/base_prompts.py +22 -0
- lionagi/lions/coder/coder.py +121 -0
- lionagi/lions/coder/util.py +91 -0
- lionagi/lions/researcher/__init__.py +0 -0
- lionagi/lions/researcher/data_source/__init__.py +0 -0
- lionagi/lions/researcher/data_source/finhub_.py +191 -0
- lionagi/lions/researcher/data_source/google_.py +199 -0
- lionagi/lions/researcher/data_source/wiki_.py +96 -0
- lionagi/lions/researcher/data_source/yfinance_.py +21 -0
- lionagi/tests/integrations/__init__.py +0 -0
- lionagi/tests/libs/__init__.py +0 -0
- lionagi/tests/libs/test_async.py +0 -0
- lionagi/tests/libs/test_field_validators.py +353 -0
- lionagi/tests/libs/test_queue.py +67 -0
- lionagi/tests/test_core/test_base_branch.py +0 -1
- lionagi/tests/test_core/test_branch.py +2 -0
- lionagi/tests/test_core/test_session_base_util.py +1 -0
- lionagi/version.py +1 -1
- {lionagi-0.0.316.dist-info → lionagi-0.1.1.dist-info}/METADATA +1 -1
- lionagi-0.1.1.dist-info/RECORD +190 -0
- lionagi/core/prompt/prompt_template.py +0 -312
- lionagi/core/schema/__init__.py +0 -22
- lionagi/core/schema/action_node.py +0 -29
- lionagi/core/schema/base_mixin.py +0 -296
- lionagi/core/schema/base_node.py +0 -199
- lionagi/core/schema/condition.py +0 -24
- lionagi/core/schema/data_logger.py +0 -354
- lionagi/core/schema/data_node.py +0 -93
- lionagi/core/schema/prompt_template.py +0 -67
- lionagi/core/schema/structure.py +0 -912
- lionagi/core/tool/manual.py +0 -1
- lionagi-0.0.316.dist-info/RECORD +0 -121
- /lionagi/core/{branch/base → execute}/__init__.py +0 -0
- /lionagi/core/flow/{base/baseflow.py → baseflow.py} +0 -0
- /lionagi/core/flow/{base/__init__.py → mono_chat_mixin.py} +0 -0
- /lionagi/core/{prompt → form}/__init__.py +0 -0
- /lionagi/{tests/test_integrations → core/graph}/__init__.py +0 -0
- /lionagi/{tests/test_libs → experimental}/__init__.py +0 -0
- /lionagi/{tests/test_libs/test_async.py → experimental/directive/__init__.py} +0 -0
- /lionagi/tests/{test_libs → libs}/test_api.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_convert.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_func_call.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_nested.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_parse.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_sys_util.py +0 -0
- {lionagi-0.0.316.dist-info → lionagi-0.1.1.dist-info}/LICENSE +0 -0
- {lionagi-0.0.316.dist-info → lionagi-0.1.1.dist-info}/WHEEL +0 -0
- {lionagi-0.0.316.dist-info → lionagi-0.1.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,294 @@
|
|
1
|
+
# TODO: Refactor this code to use the new llama_index API
|
2
|
+
|
3
|
+
# class BaseIndex:
|
4
|
+
|
5
|
+
# @staticmethod
|
6
|
+
# def _get_index(
|
7
|
+
# input_=None,
|
8
|
+
# # default to OpenAI
|
9
|
+
# llm=None,
|
10
|
+
# llm_provider=None,
|
11
|
+
# llm_kwargs={},
|
12
|
+
# service_context=None,
|
13
|
+
# service_context_kwargs={},
|
14
|
+
# index_type=None,
|
15
|
+
# index_kwargs={},
|
16
|
+
# rerank_=False,
|
17
|
+
# reranker_type=None,
|
18
|
+
# reranker=None,
|
19
|
+
# rerank_kwargs={},
|
20
|
+
# get_engine=False,
|
21
|
+
# engine_kwargs={},
|
22
|
+
# from_storage=False,
|
23
|
+
# storage_context=None,
|
24
|
+
# strorage_context_kwargs={},
|
25
|
+
# index_id=None,
|
26
|
+
# load_index_from_storage_kwargs={},
|
27
|
+
# ):
|
28
|
+
# """
|
29
|
+
# Creates and returns an index or query engine based on the provided parameters.
|
30
|
+
|
31
|
+
# Args:
|
32
|
+
# chunks: The input data to be indexed or queried.
|
33
|
+
# llm: An instance of a language model for indexing or querying.
|
34
|
+
# llm_provider: A function to provide an instance of a language model.
|
35
|
+
# llm_kwargs: Keyword arguments for configuring the language model.
|
36
|
+
# service_context: An instance of a service context.
|
37
|
+
# service_context_kwargs: Keyword arguments for configuring the service context.
|
38
|
+
# index_type: The type of index to create.
|
39
|
+
# index_kwargs: Keyword arguments for configuring the index.
|
40
|
+
# rerank_: Boolean flag indicating whether reranking should be applied.
|
41
|
+
# reranker_type: The type of reranker to use.
|
42
|
+
# reranker: An instance of a reranker.
|
43
|
+
# rerank_kwargs: Keyword arguments for configuring the reranker.
|
44
|
+
# get_engine: Boolean flag indicating whether to return a query engine.
|
45
|
+
# engine_kwargs: Keyword arguments for configuring the query engine.
|
46
|
+
|
47
|
+
# Returns:
|
48
|
+
# Index or Query Engine: Depending on the 'get_engine' flag, returns an index or query engine.
|
49
|
+
|
50
|
+
# Raises:
|
51
|
+
# Various exceptions if there are errors in creating the index or query engine.
|
52
|
+
# """
|
53
|
+
|
54
|
+
# if from_storage:
|
55
|
+
# from llama_index import StorageContext, load_index_from_storage
|
56
|
+
|
57
|
+
# storage_context = StorageContext.from_defaults(**strorage_context_kwargs)
|
58
|
+
|
59
|
+
# if index_id:
|
60
|
+
# index = load_index_from_storage(
|
61
|
+
# storage_context=storage_context,
|
62
|
+
# index_id=index_id,
|
63
|
+
# **load_index_from_storage_kwargs,
|
64
|
+
# )
|
65
|
+
# else:
|
66
|
+
# raise ValueError("Index ID is required for loading from storage.")
|
67
|
+
|
68
|
+
# if rerank_:
|
69
|
+
# if not reranker:
|
70
|
+
# if not reranker_type:
|
71
|
+
# from llama_index.postprocessor import LLMRerank
|
72
|
+
|
73
|
+
# reranker_type = LLMRerank
|
74
|
+
# reranker = reranker_type(
|
75
|
+
# service_context=service_context, **rerank_kwargs
|
76
|
+
# )
|
77
|
+
# engine_kwargs.update({"node_postprocessors": [reranker]})
|
78
|
+
|
79
|
+
# if get_engine:
|
80
|
+
# return (index, index.as_query_engine(**engine_kwargs))
|
81
|
+
# return index
|
82
|
+
|
83
|
+
# if not llm:
|
84
|
+
# if llm_provider:
|
85
|
+
# llm = llm_provider(**llm_kwargs)
|
86
|
+
# else:
|
87
|
+
# from llama_index.llms import OpenAI
|
88
|
+
|
89
|
+
# llm = OpenAI(**llm_kwargs)
|
90
|
+
|
91
|
+
# if not service_context:
|
92
|
+
# from llama_index import ServiceContext
|
93
|
+
|
94
|
+
# service_context = ServiceContext.from_defaults(
|
95
|
+
# llm=llm, **service_context_kwargs
|
96
|
+
# )
|
97
|
+
|
98
|
+
# if not index_type:
|
99
|
+
# from llama_index import VectorStoreIndex
|
100
|
+
|
101
|
+
# index_type = VectorStoreIndex
|
102
|
+
|
103
|
+
# index = index_type(input_, service_context=service_context, **index_kwargs)
|
104
|
+
|
105
|
+
# if index_id:
|
106
|
+
# index.index_id = index_id
|
107
|
+
|
108
|
+
# if rerank_:
|
109
|
+
# if not reranker:
|
110
|
+
# if not reranker_type:
|
111
|
+
# from llama_index.postprocessor import LLMRerank
|
112
|
+
|
113
|
+
# reranker_type = LLMRerank
|
114
|
+
# reranker = reranker_type(
|
115
|
+
# service_context=service_context, **rerank_kwargs
|
116
|
+
# )
|
117
|
+
# engine_kwargs.update({"node_postprocessors": [reranker]})
|
118
|
+
|
119
|
+
# if get_engine:
|
120
|
+
# return (index, index.as_query_engine(**engine_kwargs))
|
121
|
+
# return index
|
122
|
+
|
123
|
+
|
124
|
+
# class LlamaIndex:
|
125
|
+
|
126
|
+
# @staticmethod
|
127
|
+
# def kg_index(
|
128
|
+
# input_=None,
|
129
|
+
# # default to OpenAI
|
130
|
+
# llm=None,
|
131
|
+
# llm_provider=None,
|
132
|
+
# llm_kwargs={"temperature": 0.1, "model": "gpt-4-1106-preview"},
|
133
|
+
# service_context=None,
|
134
|
+
# service_context_kwargs={},
|
135
|
+
# index_kwargs={"include_embeddings": True},
|
136
|
+
# rerank_=False,
|
137
|
+
# reranker_type=None,
|
138
|
+
# reranker=None,
|
139
|
+
# rerank_kwargs={"choice_batch_size": 5, "top_n": 3},
|
140
|
+
# get_engine=False,
|
141
|
+
# engine_kwargs={"similarity_top_k": 3, "response_mode": "tree_summarize"},
|
142
|
+
# kg_triplet_extract_fn=None,
|
143
|
+
# from_storage=False,
|
144
|
+
# storage_context=None,
|
145
|
+
# strorage_context_kwargs={},
|
146
|
+
# index_id=None,
|
147
|
+
# load_index_from_storage_kwargs={},
|
148
|
+
# ):
|
149
|
+
# """
|
150
|
+
# Creates and returns a KnowledgeGraphIndex based on the provided parameters.
|
151
|
+
|
152
|
+
# Args:
|
153
|
+
# chunks: The input data to be indexed.
|
154
|
+
# llm: An instance of a language model for indexing.
|
155
|
+
# llm_provider: A function to provide an instance of a language model.
|
156
|
+
# llm_kwargs: Keyword arguments for configuring the language model.
|
157
|
+
# service_context: An instance of a service context.
|
158
|
+
# service_context_kwargs: Keyword arguments for configuring the service context.
|
159
|
+
# index_kwargs: Keyword arguments for configuring the index.
|
160
|
+
# rerank_: Boolean flag indicating whether reranking should be applied.
|
161
|
+
# reranker_type: The type of reranker to use.
|
162
|
+
# reranker: An instance of a reranker.
|
163
|
+
# rerank_kwargs: Keyword arguments for configuring the reranker.
|
164
|
+
# get_engine: Boolean flag indicating whether to return a query engine.
|
165
|
+
# engine_kwargs: Keyword arguments for configuring the query engine.
|
166
|
+
# kg_triplet_extract_fn: Optional function for extracting KG triplets.
|
167
|
+
|
168
|
+
# Returns:
|
169
|
+
# KnowledgeGraphIndex or Query Engine: Depending on the 'get_engine' flag,
|
170
|
+
# returns a KnowledgeGraphIndex or query engine.
|
171
|
+
|
172
|
+
# Raises:
|
173
|
+
# Various exceptions if there are errors in creating the index or query engine.
|
174
|
+
# """
|
175
|
+
# from llama_index import KnowledgeGraphIndex
|
176
|
+
|
177
|
+
# index_type_ = ""
|
178
|
+
# if not from_storage:
|
179
|
+
# from llama_index.graph_stores import SimpleGraphStore
|
180
|
+
# from llama_index.storage.storage_context import StorageContext
|
181
|
+
|
182
|
+
# graph_store = SimpleGraphStore()
|
183
|
+
# if storage_context is None:
|
184
|
+
# storage_context = StorageContext.from_defaults(
|
185
|
+
# graph_store=graph_store, **strorage_context_kwargs
|
186
|
+
# )
|
187
|
+
# index_kwargs.update({"storage_context": storage_context})
|
188
|
+
# index_type_ = KnowledgeGraphIndex.from_documents
|
189
|
+
|
190
|
+
# elif from_storage:
|
191
|
+
# index_type_ = KnowledgeGraphIndex
|
192
|
+
|
193
|
+
# if kg_triplet_extract_fn:
|
194
|
+
# index_kwargs.update({"kg_triplet_extract_fn": kg_triplet_extract_fn})
|
195
|
+
|
196
|
+
# if storage_context is None:
|
197
|
+
# from llama_index.graph_stores import SimpleGraphStore
|
198
|
+
# from llama_index.storage.storage_context import StorageContext
|
199
|
+
|
200
|
+
# storage_context = StorageContext.from_defaults(
|
201
|
+
# graph_store=SimpleGraphStore(), **strorage_context_kwargs
|
202
|
+
# )
|
203
|
+
|
204
|
+
# return BaseIndex._get_index(
|
205
|
+
# input_=input_,
|
206
|
+
# llm=llm,
|
207
|
+
# llm_provider=llm_provider,
|
208
|
+
# llm_kwargs=llm_kwargs,
|
209
|
+
# service_context=service_context,
|
210
|
+
# service_context_kwargs=service_context_kwargs,
|
211
|
+
# index_type=index_type_,
|
212
|
+
# index_kwargs=index_kwargs,
|
213
|
+
# rerank_=rerank_,
|
214
|
+
# reranker_type=reranker_type,
|
215
|
+
# reranker=reranker,
|
216
|
+
# rerank_kwargs=rerank_kwargs,
|
217
|
+
# get_engine=get_engine,
|
218
|
+
# engine_kwargs=engine_kwargs,
|
219
|
+
# from_storage=from_storage,
|
220
|
+
# storage_context=storage_context,
|
221
|
+
# strorage_context_kwargs=strorage_context_kwargs,
|
222
|
+
# index_id=index_id,
|
223
|
+
# load_index_from_storage_kwargs=load_index_from_storage_kwargs,
|
224
|
+
# )
|
225
|
+
|
226
|
+
# @staticmethod
|
227
|
+
# def vector_index(
|
228
|
+
# input_=None,
|
229
|
+
# # default to OpenAI
|
230
|
+
# llm=None,
|
231
|
+
# llm_provider=None,
|
232
|
+
# llm_kwargs={"temperature": 0.1, "model": "gpt-4-1106-preview"},
|
233
|
+
# service_context=None,
|
234
|
+
# service_context_kwargs={},
|
235
|
+
# index_kwargs={"include_embeddings": True},
|
236
|
+
# # default to LLMRerank
|
237
|
+
# rerank_=False,
|
238
|
+
# reranker_type=None,
|
239
|
+
# reranker=None,
|
240
|
+
# rerank_kwargs={"choice_batch_size": 5, "top_n": 3},
|
241
|
+
# get_engine=False,
|
242
|
+
# engine_kwargs={"similarity_top_k": 3, "response_mode": "tree_summarize"},
|
243
|
+
# from_storage=False,
|
244
|
+
# storage_context=None,
|
245
|
+
# strorage_context_kwargs={},
|
246
|
+
# index_id=None,
|
247
|
+
# load_index_from_storage_kwargs={},
|
248
|
+
# ):
|
249
|
+
# """
|
250
|
+
# Creates and returns a vector index or query engine based on the provided parameters.
|
251
|
+
|
252
|
+
# Args:
|
253
|
+
# chunks: The input data to be indexed or queried.
|
254
|
+
# llm: An instance of a language model for indexing or querying.
|
255
|
+
# llm_provider: A function to provide an instance of a language model.
|
256
|
+
# llm_kwargs: Keyword arguments for configuring the language model.
|
257
|
+
# service_context: An instance of a service context.
|
258
|
+
# service_context_kwargs: Keyword arguments for configuring the service context.
|
259
|
+
# index_kwargs: Keyword arguments for configuring the index.
|
260
|
+
# rerank_: Boolean flag indicating whether reranking should be applied.
|
261
|
+
# reranker_type: The type of reranker to use.
|
262
|
+
# reranker: An instance of a reranker.
|
263
|
+
# rerank_kwargs: Keyword arguments for configuring the reranker.
|
264
|
+
# get_engine: Boolean flag indicating whether to return a query engine.
|
265
|
+
# engine_kwargs: Keyword arguments for configuring the query engine.
|
266
|
+
|
267
|
+
# Returns:
|
268
|
+
# Vector Index or Query Engine: Depending on the 'get_engine' flag,
|
269
|
+
# returns a vector index or query engine.
|
270
|
+
|
271
|
+
# Raises:
|
272
|
+
# Various exceptions if there are errors in creating the index or query engine.
|
273
|
+
# """
|
274
|
+
|
275
|
+
# return BaseIndex._get_index(
|
276
|
+
# input_=input_,
|
277
|
+
# llm=llm,
|
278
|
+
# llm_provider=llm_provider,
|
279
|
+
# llm_kwargs=llm_kwargs,
|
280
|
+
# service_context=service_context,
|
281
|
+
# service_context_kwargs=service_context_kwargs,
|
282
|
+
# index_kwargs=index_kwargs,
|
283
|
+
# rerank_=rerank_,
|
284
|
+
# reranker_type=reranker_type,
|
285
|
+
# reranker=reranker,
|
286
|
+
# rerank_kwargs=rerank_kwargs,
|
287
|
+
# get_engine=get_engine,
|
288
|
+
# engine_kwargs=engine_kwargs,
|
289
|
+
# from_storage=from_storage,
|
290
|
+
# storage_context=storage_context,
|
291
|
+
# strorage_context_kwargs=strorage_context_kwargs,
|
292
|
+
# index_id=index_id,
|
293
|
+
# load_index_from_storage_kwargs=load_index_from_storage_kwargs,
|
294
|
+
# )
|
@@ -0,0 +1,227 @@
|
|
1
|
+
class LlamaPack:
|
2
|
+
|
3
|
+
@staticmethod
|
4
|
+
def download(pack_name, pack_path):
|
5
|
+
try:
|
6
|
+
from llama_index.llama_pack import download_llama_pack
|
7
|
+
|
8
|
+
return download_llama_pack(pack_name, pack_path)
|
9
|
+
except Exception as e:
|
10
|
+
raise ImportError(f"Error in downloading llama pack: {e}")
|
11
|
+
|
12
|
+
@staticmethod
|
13
|
+
def build(pack_name, pack_path, args=[], **kwargs):
|
14
|
+
pack = LlamaPack.download(pack_name, pack_path)
|
15
|
+
return pack(*args, **kwargs)
|
16
|
+
|
17
|
+
@staticmethod
|
18
|
+
def stock_market_pack(pack_path="./stock_market_data_pack", args=[], **kwargs):
|
19
|
+
name_ = "StockMarketDataQueryEnginePack"
|
20
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
21
|
+
|
22
|
+
@staticmethod
|
23
|
+
def embedded_table_pack(
|
24
|
+
pack_path="./embedded_tables_unstructured_pack", args=[], **kwargs
|
25
|
+
):
|
26
|
+
name_ = "RecursiveRetrieverSmallToBigPack"
|
27
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
28
|
+
|
29
|
+
@staticmethod
|
30
|
+
def rag_evaluator_pack(pack_path="./rag_evaluator_pack", args=[], **kwargs):
|
31
|
+
name_ = "RagEvaluatorPack"
|
32
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
33
|
+
|
34
|
+
@staticmethod
|
35
|
+
def ollma_pack(pack_path="./ollama_pack", args=[], **kwargs):
|
36
|
+
name_ = "OllamaQueryEnginePack"
|
37
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
38
|
+
|
39
|
+
@staticmethod
|
40
|
+
def llm_compiler_agent_pack(
|
41
|
+
pack_path="./llm_compiler_agent_pack", args=[], **kwargs
|
42
|
+
):
|
43
|
+
name_ = "LLMCompilerAgentPack"
|
44
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
45
|
+
|
46
|
+
@staticmethod
|
47
|
+
def resume_screener_pack(pack_path="./resume_screener_pack", args=[], **kwargs):
|
48
|
+
name_ = "ResumeScreenerPack"
|
49
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
50
|
+
|
51
|
+
@staticmethod
|
52
|
+
def ragatouille_retriever_pack(pack_path="./ragatouille_pack", args=[], **kwargs):
|
53
|
+
name_ = "RAGatouilleRetrieverPack"
|
54
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
55
|
+
|
56
|
+
@staticmethod
|
57
|
+
def chain_of_table_pack(pack_path="./chain_of_table_pack", args=[], **kwargs):
|
58
|
+
name_ = "ChainOfTablePack"
|
59
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
60
|
+
|
61
|
+
@staticmethod
|
62
|
+
def hybrid_fusion_retriever_pack(
|
63
|
+
pack_path="./hybrid_fusion_pack", args=[], **kwargs
|
64
|
+
):
|
65
|
+
name_ = "HybridFusionRetrieverPack"
|
66
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
67
|
+
|
68
|
+
@staticmethod
|
69
|
+
def neo4j_query_engine_pack(pack_path="./neo4j_pack", args=[], **kwargs):
|
70
|
+
name_ = "Neo4jQueryEnginePack"
|
71
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
72
|
+
|
73
|
+
@staticmethod
|
74
|
+
def llava_completion_pack(pack_path="./llava_pack", args=[], **kwargs):
|
75
|
+
name_ = "LlavaCompletionPack"
|
76
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
77
|
+
|
78
|
+
@staticmethod
|
79
|
+
def sentence_window_retriever_pack(
|
80
|
+
pack_path="./sentence_window_retriever_pack", args=[], **kwargs
|
81
|
+
):
|
82
|
+
name_ = "SentenceWindowRetrieverPack"
|
83
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
84
|
+
|
85
|
+
@staticmethod
|
86
|
+
def dense_x_retrieval_pack(pack_path="./dense_pack", args=[], **kwargs):
|
87
|
+
name_ = "DenseXRetrievalPack"
|
88
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
89
|
+
|
90
|
+
@staticmethod
|
91
|
+
def zephyr_query_engine_pack(pack_path="./zephyr_pack", args=[], **kwargs):
|
92
|
+
name_ = "ZephyrQueryEnginePack"
|
93
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
94
|
+
|
95
|
+
@staticmethod
|
96
|
+
def query_rewriting_retriever_pack(
|
97
|
+
pack_path="./query_rewriting_pack", args=[], **kwargs
|
98
|
+
):
|
99
|
+
name_ = "QueryRewritingRetrieverPack"
|
100
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
101
|
+
|
102
|
+
@staticmethod
|
103
|
+
def fuzzy_citation_engine_pack(
|
104
|
+
pack_path="./fuzzy_citation_pack", args=[], **kwargs
|
105
|
+
):
|
106
|
+
name_ = "FuzzyCitationEnginePack"
|
107
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
108
|
+
|
109
|
+
@staticmethod
|
110
|
+
def multidoc_auto_retriever_pack(
|
111
|
+
pack_path="./multidoc_autoretrieval_pack", args=[], **kwargs
|
112
|
+
):
|
113
|
+
name_ = "MultiDocAutoRetrieverPack"
|
114
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
115
|
+
|
116
|
+
@staticmethod
|
117
|
+
def auto_merging_retriever_pack(
|
118
|
+
pack_path="./auto_merging_retriever_pack", args=[], **kwargs
|
119
|
+
):
|
120
|
+
name_ = "AutoMergingRetrieverPack"
|
121
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
122
|
+
|
123
|
+
@staticmethod
|
124
|
+
def voyage_query_engine_pack(pack_path="./voyage_pack", args=[], **kwargs):
|
125
|
+
name_ = "VoyageQueryEnginePack"
|
126
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
127
|
+
|
128
|
+
@staticmethod
|
129
|
+
def mix_self_consistency_pack(
|
130
|
+
pack_path="./mix_self_consistency_pack", args=[], **kwargs
|
131
|
+
):
|
132
|
+
name_ = "MixSelfConsistencyPack"
|
133
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
134
|
+
|
135
|
+
@staticmethod
|
136
|
+
def rag_fusion_pipeline_pack(
|
137
|
+
pack_path="./rag_fusion_pipeline_pack", args=[], **kwargs
|
138
|
+
):
|
139
|
+
name_ = "RAGFusionPipelinePack"
|
140
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
141
|
+
|
142
|
+
@staticmethod
|
143
|
+
def multi_document_agents_pack(
|
144
|
+
pack_path="./multi_doc_agents_pack", args=[], **kwargs
|
145
|
+
):
|
146
|
+
name_ = "MultiDocumentAgentsPack"
|
147
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
148
|
+
|
149
|
+
@staticmethod
|
150
|
+
def llama_guard_moderator_pack(pack_path="./llamaguard_pack", args=[], **kwargs):
|
151
|
+
name_ = "LlamaGuardModeratorPack"
|
152
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
153
|
+
|
154
|
+
@staticmethod
|
155
|
+
def evaluator_benchmarker_pack(
|
156
|
+
pack_path="./eval_benchmark_pack", args=[], **kwargs
|
157
|
+
):
|
158
|
+
name_ = "EvaluatorBenchmarkerPack"
|
159
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
160
|
+
|
161
|
+
@staticmethod
|
162
|
+
def amazon_product_extraction_pack(
|
163
|
+
pack_path="./amazon_product_extraction_pack", args=[], **kwargs
|
164
|
+
):
|
165
|
+
name_ = "AmazonProductExtractionPack"
|
166
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
167
|
+
|
168
|
+
@staticmethod
|
169
|
+
def llama_dataset_metadata_pack(
|
170
|
+
pack_path="./llama_dataset_metadata_pack", args=[], **kwargs
|
171
|
+
):
|
172
|
+
name_ = "LlamaDatasetMetadataPack"
|
173
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
174
|
+
|
175
|
+
@staticmethod
|
176
|
+
def multi_tenancy_rag_pack(pack_path="./multitenancy_rag_pack", args=[], **kwargs):
|
177
|
+
name_ = "MultiTenancyRAGPack"
|
178
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
179
|
+
|
180
|
+
@staticmethod
|
181
|
+
def gmail_openai_agent_pack(pack_path="./gmail_pack", args=[], **kwargs):
|
182
|
+
name_ = "GmailOpenAIAgentPack"
|
183
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
184
|
+
|
185
|
+
@staticmethod
|
186
|
+
def snowflake_query_engine_pack(pack_path="./snowflake_pack", args=[], **kwargs):
|
187
|
+
name_ = "SnowflakeQueryEnginePack"
|
188
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
189
|
+
|
190
|
+
@staticmethod
|
191
|
+
def agent_search_retriever_pack(pack_path="./agent_search_pack", args=[], **kwargs):
|
192
|
+
name_ = "AgentSearchRetrieverPack"
|
193
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
194
|
+
|
195
|
+
@staticmethod
|
196
|
+
def vectara_rag_pack(pack_path="./vectara_rag_pack", args=[], **kwargs):
|
197
|
+
name_ = "VectaraRagPack"
|
198
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
199
|
+
|
200
|
+
@staticmethod
|
201
|
+
def chroma_autoretrieval_pack(pack_path="./chroma_pack", args=[], **kwargs):
|
202
|
+
name_ = "ChromaAutoretrievalPack"
|
203
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
204
|
+
|
205
|
+
@staticmethod
|
206
|
+
def arize_phoenix_query_engine_pack(pack_path="./arize_pack", args=[], **kwargs):
|
207
|
+
name_ = "ArizePhoenixQueryEnginePack"
|
208
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
209
|
+
|
210
|
+
@staticmethod
|
211
|
+
def redis_ingestion_pipeline_pack(
|
212
|
+
pack_path="./redis_ingestion_pack", args=[], **kwargs
|
213
|
+
):
|
214
|
+
name_ = "RedisIngestionPipelinePack"
|
215
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
216
|
+
|
217
|
+
@staticmethod
|
218
|
+
def nebula_graph_query_engine_pack(
|
219
|
+
pack_path="./nebulagraph_pack", args=[], **kwargs
|
220
|
+
):
|
221
|
+
name_ = "NebulaGraphQueryEnginePack"
|
222
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
223
|
+
|
224
|
+
@staticmethod
|
225
|
+
def weaviate_retry_engine_pack(pack_path="./weaviate_pack", args=[], **kwargs):
|
226
|
+
name_ = "WeaviateRetryEnginePack"
|
227
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
File without changes
|
@@ -0,0 +1,36 @@
|
|
1
|
+
import subprocess
|
2
|
+
from lionagi.libs import SysUtil
|
3
|
+
|
4
|
+
|
5
|
+
def get_pytorch_install_command():
|
6
|
+
cpu_arch = SysUtil.get_cpu_architecture()
|
7
|
+
|
8
|
+
if cpu_arch == "apple_silicon":
|
9
|
+
return "pip install --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu"
|
10
|
+
else:
|
11
|
+
# Default CPU installation
|
12
|
+
return "pip install torch torchvision torchaudio"
|
13
|
+
|
14
|
+
|
15
|
+
def install_pytorch():
|
16
|
+
command = get_pytorch_install_command()
|
17
|
+
try:
|
18
|
+
subprocess.run(command.split(), check=True)
|
19
|
+
print("PyTorch installed successfully.")
|
20
|
+
except subprocess.CalledProcessError as e:
|
21
|
+
print(f"Failed to install PyTorch: {e}")
|
22
|
+
|
23
|
+
|
24
|
+
def install_transformers():
|
25
|
+
if not SysUtil.is_package_installed("torch"):
|
26
|
+
in_ = input(
|
27
|
+
"PyTorch is required for transformers. Would you like to install it now? (y/n): "
|
28
|
+
)
|
29
|
+
if in_ == "y":
|
30
|
+
install_pytorch()
|
31
|
+
if not SysUtil.is_package_installed("transformers"):
|
32
|
+
in_ = input(
|
33
|
+
"transformers is required. Would you like to install it now? (y/n): "
|
34
|
+
)
|
35
|
+
if in_ == "y":
|
36
|
+
SysUtil.install_import(package_name="transformers", import_name="pipeline")
|
@@ -1,7 +1,7 @@
|
|
1
1
|
from typing import Union, Callable
|
2
2
|
|
3
3
|
from lionagi.libs import func_call
|
4
|
-
from lionagi.core.
|
4
|
+
from lionagi.core.generic import Node
|
5
5
|
from ..bridge.langchain_.langchain_bridge import LangchainBridge
|
6
6
|
from ..bridge.llamaindex_.llama_index_bridge import LlamaIndexBridge
|
7
7
|
|
@@ -12,7 +12,7 @@ from ..loader.load_util import ChunkerType, file_to_chunks, _datanode_parser
|
|
12
12
|
def datanodes_convert(documents, chunker_type):
|
13
13
|
|
14
14
|
for i in range(len(documents)):
|
15
|
-
if type(documents[i]) ==
|
15
|
+
if type(documents[i]) == Node:
|
16
16
|
if chunker_type == ChunkerType.LLAMAINDEX:
|
17
17
|
documents[i] = documents[i].to_llama_index()
|
18
18
|
elif chunker_type == ChunkerType.LANGCHAIN:
|
@@ -25,7 +25,7 @@ def text_chunker(documents, args, kwargs):
|
|
25
25
|
def chunk_node(node):
|
26
26
|
chunks = file_to_chunks(node.to_dict(), *args, **kwargs)
|
27
27
|
func_call.lcall(chunks, lambda chunk: chunk.pop("node_id"))
|
28
|
-
return [
|
28
|
+
return [Node.from_obj({**chunk}) for chunk in chunks]
|
29
29
|
|
30
30
|
return [chunk_node(doc) for doc in documents]
|
31
31
|
|
@@ -106,7 +106,7 @@ def _self_defined_chunker(
|
|
106
106
|
) from e
|
107
107
|
|
108
108
|
if isinstance(to_datanode, bool) and to_datanode is True:
|
109
|
-
raise ValueError("Please define a valid parser to
|
109
|
+
raise ValueError("Please define a valid parser to Node.")
|
110
110
|
elif isinstance(to_datanode, Callable):
|
111
111
|
nodes = _datanode_parser(nodes, to_datanode)
|
112
112
|
return nodes
|
@@ -127,7 +127,7 @@ def _llama_index_chunker(
|
|
127
127
|
)
|
128
128
|
|
129
129
|
if isinstance(to_datanode, bool) and to_datanode is True:
|
130
|
-
nodes = [
|
130
|
+
nodes = [Node.from_llama_index(i) for i in nodes]
|
131
131
|
elif isinstance(to_datanode, Callable):
|
132
132
|
nodes = _datanode_parser(nodes, to_datanode)
|
133
133
|
return nodes
|
@@ -148,9 +148,9 @@ def _langchain_chunker(
|
|
148
148
|
)
|
149
149
|
if isinstance(to_datanode, bool) and to_datanode is True:
|
150
150
|
if isinstance(documents, str):
|
151
|
-
nodes = [
|
151
|
+
nodes = [Node(content=i) for i in nodes]
|
152
152
|
else:
|
153
|
-
nodes = [
|
153
|
+
nodes = [Node.from_langchain(i) for i in nodes]
|
154
154
|
elif isinstance(to_datanode, Callable):
|
155
155
|
nodes = _datanode_parser(nodes, to_datanode)
|
156
156
|
return nodes
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
# ChatCompletion
|
4
4
|
oai_chat_llmconfig = {
|
5
|
-
"model": "gpt-4-turbo
|
5
|
+
"model": "gpt-4-turbo",
|
6
6
|
"frequency_penalty": 0,
|
7
7
|
"max_tokens": None,
|
8
8
|
"n": 1,
|
@@ -79,7 +79,7 @@ oai_audio_speech_schema = {
|
|
79
79
|
oai_audio_transcriptions_llmconfig = {
|
80
80
|
"model": "whisper-1",
|
81
81
|
"language": None,
|
82
|
-
"
|
82
|
+
"format_prompt": None,
|
83
83
|
"response_format": "json",
|
84
84
|
"temperature": 0,
|
85
85
|
}
|
@@ -88,7 +88,7 @@ oai_audio_transcriptions_schema = {
|
|
88
88
|
"optional": [
|
89
89
|
"response_format",
|
90
90
|
"language",
|
91
|
-
"
|
91
|
+
"format_prompt",
|
92
92
|
"response_format",
|
93
93
|
"temperature",
|
94
94
|
],
|
@@ -99,14 +99,14 @@ oai_audio_transcriptions_schema = {
|
|
99
99
|
# Audio ------------ translations
|
100
100
|
oai_audio_translations_llmconfig = {
|
101
101
|
"model": "whisper-1",
|
102
|
-
"
|
102
|
+
"format_prompt": None,
|
103
103
|
"response_format": "json",
|
104
104
|
"temperature": 0,
|
105
105
|
}
|
106
106
|
|
107
107
|
oai_audio_translations_schema = {
|
108
108
|
"required": ["model"],
|
109
|
-
"optional": ["response_format", "speed", "
|
109
|
+
"optional": ["response_format", "speed", "format_prompt", "temperature"],
|
110
110
|
"input_": "file",
|
111
111
|
"config": oai_audio_translations_llmconfig,
|
112
112
|
}
|
@@ -1 +1 @@
|
|
1
|
-
model = "
|
1
|
+
model = "llama3"
|