lionagi 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/core/agent/base_agent.py +2 -3
- lionagi/core/branch/base.py +1 -1
- lionagi/core/branch/branch.py +2 -1
- lionagi/core/branch/flow_mixin.py +1 -1
- lionagi/core/branch/util.py +1 -1
- lionagi/core/execute/base_executor.py +1 -4
- lionagi/core/execute/branch_executor.py +66 -3
- lionagi/core/execute/instruction_map_executor.py +48 -0
- lionagi/core/execute/neo4j_executor.py +381 -0
- lionagi/core/execute/structure_executor.py +99 -3
- lionagi/core/flow/monoflow/ReAct.py +18 -18
- lionagi/core/flow/monoflow/chat_mixin.py +1 -1
- lionagi/core/flow/monoflow/followup.py +11 -12
- lionagi/core/flow/polyflow/__init__.py +1 -1
- lionagi/core/generic/component.py +0 -2
- lionagi/core/generic/condition.py +1 -1
- lionagi/core/generic/edge.py +52 -0
- lionagi/core/mail/mail_manager.py +3 -2
- lionagi/core/session/session.py +1 -1
- lionagi/experimental/__init__.py +0 -0
- lionagi/experimental/directive/__init__.py +0 -0
- lionagi/experimental/directive/evaluator/__init__.py +0 -0
- lionagi/experimental/directive/evaluator/ast_evaluator.py +115 -0
- lionagi/experimental/directive/evaluator/base_evaluator.py +202 -0
- lionagi/experimental/directive/evaluator/sandbox_.py +14 -0
- lionagi/experimental/directive/evaluator/script_engine.py +83 -0
- lionagi/experimental/directive/parser/__init__.py +0 -0
- lionagi/experimental/directive/parser/base_parser.py +215 -0
- lionagi/experimental/directive/schema.py +36 -0
- lionagi/experimental/directive/template_/__init__.py +0 -0
- lionagi/experimental/directive/template_/base_template.py +63 -0
- lionagi/experimental/tool/__init__.py +0 -0
- lionagi/experimental/tool/function_calling.py +43 -0
- lionagi/experimental/tool/manual.py +66 -0
- lionagi/experimental/tool/schema.py +59 -0
- lionagi/experimental/tool/tool_manager.py +138 -0
- lionagi/experimental/tool/util.py +16 -0
- lionagi/experimental/work/__init__.py +0 -0
- lionagi/experimental/work/_logger.py +25 -0
- lionagi/experimental/work/exchange.py +0 -0
- lionagi/experimental/work/schema.py +30 -0
- lionagi/experimental/work/tests.py +72 -0
- lionagi/experimental/work/util.py +0 -0
- lionagi/experimental/work/work_function.py +89 -0
- lionagi/experimental/work/worker.py +12 -0
- lionagi/integrations/bridge/autogen_/__init__.py +0 -0
- lionagi/integrations/bridge/autogen_/autogen_.py +124 -0
- lionagi/integrations/bridge/llamaindex_/get_index.py +294 -0
- lionagi/integrations/bridge/llamaindex_/llama_pack.py +227 -0
- lionagi/integrations/bridge/transformers_/__init__.py +0 -0
- lionagi/integrations/bridge/transformers_/install_.py +36 -0
- lionagi/integrations/config/oai_configs.py +1 -1
- lionagi/integrations/config/ollama_configs.py +1 -1
- lionagi/integrations/config/openrouter_configs.py +1 -1
- lionagi/integrations/storage/__init__.py +3 -0
- lionagi/integrations/storage/neo4j.py +673 -0
- lionagi/integrations/storage/storage_util.py +289 -0
- lionagi/integrations/storage/to_csv.py +63 -0
- lionagi/integrations/storage/to_excel.py +67 -0
- lionagi/libs/ln_knowledge_graph.py +405 -0
- lionagi/libs/ln_queue.py +101 -0
- lionagi/libs/ln_tokenizer.py +57 -0
- lionagi/libs/sys_util.py +1 -1
- lionagi/lions/__init__.py +0 -0
- lionagi/lions/coder/__init__.py +0 -0
- lionagi/lions/coder/add_feature.py +20 -0
- lionagi/lions/coder/base_prompts.py +22 -0
- lionagi/lions/coder/coder.py +121 -0
- lionagi/lions/coder/util.py +91 -0
- lionagi/lions/researcher/__init__.py +0 -0
- lionagi/lions/researcher/data_source/__init__.py +0 -0
- lionagi/lions/researcher/data_source/finhub_.py +191 -0
- lionagi/lions/researcher/data_source/google_.py +199 -0
- lionagi/lions/researcher/data_source/wiki_.py +96 -0
- lionagi/lions/researcher/data_source/yfinance_.py +21 -0
- lionagi/tests/libs/test_queue.py +67 -0
- lionagi/tests/test_core/test_branch.py +0 -1
- lionagi/version.py +1 -1
- {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/METADATA +1 -1
- {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/RECORD +83 -29
- {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/LICENSE +0 -0
- {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/WHEEL +0 -0
- {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,294 @@
|
|
1
|
+
# TODO: Refactor this code to use the new llama_index API
|
2
|
+
|
3
|
+
# class BaseIndex:
|
4
|
+
|
5
|
+
# @staticmethod
|
6
|
+
# def _get_index(
|
7
|
+
# input_=None,
|
8
|
+
# # default to OpenAI
|
9
|
+
# llm=None,
|
10
|
+
# llm_provider=None,
|
11
|
+
# llm_kwargs={},
|
12
|
+
# service_context=None,
|
13
|
+
# service_context_kwargs={},
|
14
|
+
# index_type=None,
|
15
|
+
# index_kwargs={},
|
16
|
+
# rerank_=False,
|
17
|
+
# reranker_type=None,
|
18
|
+
# reranker=None,
|
19
|
+
# rerank_kwargs={},
|
20
|
+
# get_engine=False,
|
21
|
+
# engine_kwargs={},
|
22
|
+
# from_storage=False,
|
23
|
+
# storage_context=None,
|
24
|
+
# strorage_context_kwargs={},
|
25
|
+
# index_id=None,
|
26
|
+
# load_index_from_storage_kwargs={},
|
27
|
+
# ):
|
28
|
+
# """
|
29
|
+
# Creates and returns an index or query engine based on the provided parameters.
|
30
|
+
|
31
|
+
# Args:
|
32
|
+
# chunks: The input data to be indexed or queried.
|
33
|
+
# llm: An instance of a language model for indexing or querying.
|
34
|
+
# llm_provider: A function to provide an instance of a language model.
|
35
|
+
# llm_kwargs: Keyword arguments for configuring the language model.
|
36
|
+
# service_context: An instance of a service context.
|
37
|
+
# service_context_kwargs: Keyword arguments for configuring the service context.
|
38
|
+
# index_type: The type of index to create.
|
39
|
+
# index_kwargs: Keyword arguments for configuring the index.
|
40
|
+
# rerank_: Boolean flag indicating whether reranking should be applied.
|
41
|
+
# reranker_type: The type of reranker to use.
|
42
|
+
# reranker: An instance of a reranker.
|
43
|
+
# rerank_kwargs: Keyword arguments for configuring the reranker.
|
44
|
+
# get_engine: Boolean flag indicating whether to return a query engine.
|
45
|
+
# engine_kwargs: Keyword arguments for configuring the query engine.
|
46
|
+
|
47
|
+
# Returns:
|
48
|
+
# Index or Query Engine: Depending on the 'get_engine' flag, returns an index or query engine.
|
49
|
+
|
50
|
+
# Raises:
|
51
|
+
# Various exceptions if there are errors in creating the index or query engine.
|
52
|
+
# """
|
53
|
+
|
54
|
+
# if from_storage:
|
55
|
+
# from llama_index import StorageContext, load_index_from_storage
|
56
|
+
|
57
|
+
# storage_context = StorageContext.from_defaults(**strorage_context_kwargs)
|
58
|
+
|
59
|
+
# if index_id:
|
60
|
+
# index = load_index_from_storage(
|
61
|
+
# storage_context=storage_context,
|
62
|
+
# index_id=index_id,
|
63
|
+
# **load_index_from_storage_kwargs,
|
64
|
+
# )
|
65
|
+
# else:
|
66
|
+
# raise ValueError("Index ID is required for loading from storage.")
|
67
|
+
|
68
|
+
# if rerank_:
|
69
|
+
# if not reranker:
|
70
|
+
# if not reranker_type:
|
71
|
+
# from llama_index.postprocessor import LLMRerank
|
72
|
+
|
73
|
+
# reranker_type = LLMRerank
|
74
|
+
# reranker = reranker_type(
|
75
|
+
# service_context=service_context, **rerank_kwargs
|
76
|
+
# )
|
77
|
+
# engine_kwargs.update({"node_postprocessors": [reranker]})
|
78
|
+
|
79
|
+
# if get_engine:
|
80
|
+
# return (index, index.as_query_engine(**engine_kwargs))
|
81
|
+
# return index
|
82
|
+
|
83
|
+
# if not llm:
|
84
|
+
# if llm_provider:
|
85
|
+
# llm = llm_provider(**llm_kwargs)
|
86
|
+
# else:
|
87
|
+
# from llama_index.llms import OpenAI
|
88
|
+
|
89
|
+
# llm = OpenAI(**llm_kwargs)
|
90
|
+
|
91
|
+
# if not service_context:
|
92
|
+
# from llama_index import ServiceContext
|
93
|
+
|
94
|
+
# service_context = ServiceContext.from_defaults(
|
95
|
+
# llm=llm, **service_context_kwargs
|
96
|
+
# )
|
97
|
+
|
98
|
+
# if not index_type:
|
99
|
+
# from llama_index import VectorStoreIndex
|
100
|
+
|
101
|
+
# index_type = VectorStoreIndex
|
102
|
+
|
103
|
+
# index = index_type(input_, service_context=service_context, **index_kwargs)
|
104
|
+
|
105
|
+
# if index_id:
|
106
|
+
# index.index_id = index_id
|
107
|
+
|
108
|
+
# if rerank_:
|
109
|
+
# if not reranker:
|
110
|
+
# if not reranker_type:
|
111
|
+
# from llama_index.postprocessor import LLMRerank
|
112
|
+
|
113
|
+
# reranker_type = LLMRerank
|
114
|
+
# reranker = reranker_type(
|
115
|
+
# service_context=service_context, **rerank_kwargs
|
116
|
+
# )
|
117
|
+
# engine_kwargs.update({"node_postprocessors": [reranker]})
|
118
|
+
|
119
|
+
# if get_engine:
|
120
|
+
# return (index, index.as_query_engine(**engine_kwargs))
|
121
|
+
# return index
|
122
|
+
|
123
|
+
|
124
|
+
# class LlamaIndex:
|
125
|
+
|
126
|
+
# @staticmethod
|
127
|
+
# def kg_index(
|
128
|
+
# input_=None,
|
129
|
+
# # default to OpenAI
|
130
|
+
# llm=None,
|
131
|
+
# llm_provider=None,
|
132
|
+
# llm_kwargs={"temperature": 0.1, "model": "gpt-4-1106-preview"},
|
133
|
+
# service_context=None,
|
134
|
+
# service_context_kwargs={},
|
135
|
+
# index_kwargs={"include_embeddings": True},
|
136
|
+
# rerank_=False,
|
137
|
+
# reranker_type=None,
|
138
|
+
# reranker=None,
|
139
|
+
# rerank_kwargs={"choice_batch_size": 5, "top_n": 3},
|
140
|
+
# get_engine=False,
|
141
|
+
# engine_kwargs={"similarity_top_k": 3, "response_mode": "tree_summarize"},
|
142
|
+
# kg_triplet_extract_fn=None,
|
143
|
+
# from_storage=False,
|
144
|
+
# storage_context=None,
|
145
|
+
# strorage_context_kwargs={},
|
146
|
+
# index_id=None,
|
147
|
+
# load_index_from_storage_kwargs={},
|
148
|
+
# ):
|
149
|
+
# """
|
150
|
+
# Creates and returns a KnowledgeGraphIndex based on the provided parameters.
|
151
|
+
|
152
|
+
# Args:
|
153
|
+
# chunks: The input data to be indexed.
|
154
|
+
# llm: An instance of a language model for indexing.
|
155
|
+
# llm_provider: A function to provide an instance of a language model.
|
156
|
+
# llm_kwargs: Keyword arguments for configuring the language model.
|
157
|
+
# service_context: An instance of a service context.
|
158
|
+
# service_context_kwargs: Keyword arguments for configuring the service context.
|
159
|
+
# index_kwargs: Keyword arguments for configuring the index.
|
160
|
+
# rerank_: Boolean flag indicating whether reranking should be applied.
|
161
|
+
# reranker_type: The type of reranker to use.
|
162
|
+
# reranker: An instance of a reranker.
|
163
|
+
# rerank_kwargs: Keyword arguments for configuring the reranker.
|
164
|
+
# get_engine: Boolean flag indicating whether to return a query engine.
|
165
|
+
# engine_kwargs: Keyword arguments for configuring the query engine.
|
166
|
+
# kg_triplet_extract_fn: Optional function for extracting KG triplets.
|
167
|
+
|
168
|
+
# Returns:
|
169
|
+
# KnowledgeGraphIndex or Query Engine: Depending on the 'get_engine' flag,
|
170
|
+
# returns a KnowledgeGraphIndex or query engine.
|
171
|
+
|
172
|
+
# Raises:
|
173
|
+
# Various exceptions if there are errors in creating the index or query engine.
|
174
|
+
# """
|
175
|
+
# from llama_index import KnowledgeGraphIndex
|
176
|
+
|
177
|
+
# index_type_ = ""
|
178
|
+
# if not from_storage:
|
179
|
+
# from llama_index.graph_stores import SimpleGraphStore
|
180
|
+
# from llama_index.storage.storage_context import StorageContext
|
181
|
+
|
182
|
+
# graph_store = SimpleGraphStore()
|
183
|
+
# if storage_context is None:
|
184
|
+
# storage_context = StorageContext.from_defaults(
|
185
|
+
# graph_store=graph_store, **strorage_context_kwargs
|
186
|
+
# )
|
187
|
+
# index_kwargs.update({"storage_context": storage_context})
|
188
|
+
# index_type_ = KnowledgeGraphIndex.from_documents
|
189
|
+
|
190
|
+
# elif from_storage:
|
191
|
+
# index_type_ = KnowledgeGraphIndex
|
192
|
+
|
193
|
+
# if kg_triplet_extract_fn:
|
194
|
+
# index_kwargs.update({"kg_triplet_extract_fn": kg_triplet_extract_fn})
|
195
|
+
|
196
|
+
# if storage_context is None:
|
197
|
+
# from llama_index.graph_stores import SimpleGraphStore
|
198
|
+
# from llama_index.storage.storage_context import StorageContext
|
199
|
+
|
200
|
+
# storage_context = StorageContext.from_defaults(
|
201
|
+
# graph_store=SimpleGraphStore(), **strorage_context_kwargs
|
202
|
+
# )
|
203
|
+
|
204
|
+
# return BaseIndex._get_index(
|
205
|
+
# input_=input_,
|
206
|
+
# llm=llm,
|
207
|
+
# llm_provider=llm_provider,
|
208
|
+
# llm_kwargs=llm_kwargs,
|
209
|
+
# service_context=service_context,
|
210
|
+
# service_context_kwargs=service_context_kwargs,
|
211
|
+
# index_type=index_type_,
|
212
|
+
# index_kwargs=index_kwargs,
|
213
|
+
# rerank_=rerank_,
|
214
|
+
# reranker_type=reranker_type,
|
215
|
+
# reranker=reranker,
|
216
|
+
# rerank_kwargs=rerank_kwargs,
|
217
|
+
# get_engine=get_engine,
|
218
|
+
# engine_kwargs=engine_kwargs,
|
219
|
+
# from_storage=from_storage,
|
220
|
+
# storage_context=storage_context,
|
221
|
+
# strorage_context_kwargs=strorage_context_kwargs,
|
222
|
+
# index_id=index_id,
|
223
|
+
# load_index_from_storage_kwargs=load_index_from_storage_kwargs,
|
224
|
+
# )
|
225
|
+
|
226
|
+
# @staticmethod
|
227
|
+
# def vector_index(
|
228
|
+
# input_=None,
|
229
|
+
# # default to OpenAI
|
230
|
+
# llm=None,
|
231
|
+
# llm_provider=None,
|
232
|
+
# llm_kwargs={"temperature": 0.1, "model": "gpt-4-1106-preview"},
|
233
|
+
# service_context=None,
|
234
|
+
# service_context_kwargs={},
|
235
|
+
# index_kwargs={"include_embeddings": True},
|
236
|
+
# # default to LLMRerank
|
237
|
+
# rerank_=False,
|
238
|
+
# reranker_type=None,
|
239
|
+
# reranker=None,
|
240
|
+
# rerank_kwargs={"choice_batch_size": 5, "top_n": 3},
|
241
|
+
# get_engine=False,
|
242
|
+
# engine_kwargs={"similarity_top_k": 3, "response_mode": "tree_summarize"},
|
243
|
+
# from_storage=False,
|
244
|
+
# storage_context=None,
|
245
|
+
# strorage_context_kwargs={},
|
246
|
+
# index_id=None,
|
247
|
+
# load_index_from_storage_kwargs={},
|
248
|
+
# ):
|
249
|
+
# """
|
250
|
+
# Creates and returns a vector index or query engine based on the provided parameters.
|
251
|
+
|
252
|
+
# Args:
|
253
|
+
# chunks: The input data to be indexed or queried.
|
254
|
+
# llm: An instance of a language model for indexing or querying.
|
255
|
+
# llm_provider: A function to provide an instance of a language model.
|
256
|
+
# llm_kwargs: Keyword arguments for configuring the language model.
|
257
|
+
# service_context: An instance of a service context.
|
258
|
+
# service_context_kwargs: Keyword arguments for configuring the service context.
|
259
|
+
# index_kwargs: Keyword arguments for configuring the index.
|
260
|
+
# rerank_: Boolean flag indicating whether reranking should be applied.
|
261
|
+
# reranker_type: The type of reranker to use.
|
262
|
+
# reranker: An instance of a reranker.
|
263
|
+
# rerank_kwargs: Keyword arguments for configuring the reranker.
|
264
|
+
# get_engine: Boolean flag indicating whether to return a query engine.
|
265
|
+
# engine_kwargs: Keyword arguments for configuring the query engine.
|
266
|
+
|
267
|
+
# Returns:
|
268
|
+
# Vector Index or Query Engine: Depending on the 'get_engine' flag,
|
269
|
+
# returns a vector index or query engine.
|
270
|
+
|
271
|
+
# Raises:
|
272
|
+
# Various exceptions if there are errors in creating the index or query engine.
|
273
|
+
# """
|
274
|
+
|
275
|
+
# return BaseIndex._get_index(
|
276
|
+
# input_=input_,
|
277
|
+
# llm=llm,
|
278
|
+
# llm_provider=llm_provider,
|
279
|
+
# llm_kwargs=llm_kwargs,
|
280
|
+
# service_context=service_context,
|
281
|
+
# service_context_kwargs=service_context_kwargs,
|
282
|
+
# index_kwargs=index_kwargs,
|
283
|
+
# rerank_=rerank_,
|
284
|
+
# reranker_type=reranker_type,
|
285
|
+
# reranker=reranker,
|
286
|
+
# rerank_kwargs=rerank_kwargs,
|
287
|
+
# get_engine=get_engine,
|
288
|
+
# engine_kwargs=engine_kwargs,
|
289
|
+
# from_storage=from_storage,
|
290
|
+
# storage_context=storage_context,
|
291
|
+
# strorage_context_kwargs=strorage_context_kwargs,
|
292
|
+
# index_id=index_id,
|
293
|
+
# load_index_from_storage_kwargs=load_index_from_storage_kwargs,
|
294
|
+
# )
|
@@ -0,0 +1,227 @@
|
|
1
|
+
class LlamaPack:
|
2
|
+
|
3
|
+
@staticmethod
|
4
|
+
def download(pack_name, pack_path):
|
5
|
+
try:
|
6
|
+
from llama_index.llama_pack import download_llama_pack
|
7
|
+
|
8
|
+
return download_llama_pack(pack_name, pack_path)
|
9
|
+
except Exception as e:
|
10
|
+
raise ImportError(f"Error in downloading llama pack: {e}")
|
11
|
+
|
12
|
+
@staticmethod
|
13
|
+
def build(pack_name, pack_path, args=[], **kwargs):
|
14
|
+
pack = LlamaPack.download(pack_name, pack_path)
|
15
|
+
return pack(*args, **kwargs)
|
16
|
+
|
17
|
+
@staticmethod
|
18
|
+
def stock_market_pack(pack_path="./stock_market_data_pack", args=[], **kwargs):
|
19
|
+
name_ = "StockMarketDataQueryEnginePack"
|
20
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
21
|
+
|
22
|
+
@staticmethod
|
23
|
+
def embedded_table_pack(
|
24
|
+
pack_path="./embedded_tables_unstructured_pack", args=[], **kwargs
|
25
|
+
):
|
26
|
+
name_ = "RecursiveRetrieverSmallToBigPack"
|
27
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
28
|
+
|
29
|
+
@staticmethod
|
30
|
+
def rag_evaluator_pack(pack_path="./rag_evaluator_pack", args=[], **kwargs):
|
31
|
+
name_ = "RagEvaluatorPack"
|
32
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
33
|
+
|
34
|
+
@staticmethod
|
35
|
+
def ollma_pack(pack_path="./ollama_pack", args=[], **kwargs):
|
36
|
+
name_ = "OllamaQueryEnginePack"
|
37
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
38
|
+
|
39
|
+
@staticmethod
|
40
|
+
def llm_compiler_agent_pack(
|
41
|
+
pack_path="./llm_compiler_agent_pack", args=[], **kwargs
|
42
|
+
):
|
43
|
+
name_ = "LLMCompilerAgentPack"
|
44
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
45
|
+
|
46
|
+
@staticmethod
|
47
|
+
def resume_screener_pack(pack_path="./resume_screener_pack", args=[], **kwargs):
|
48
|
+
name_ = "ResumeScreenerPack"
|
49
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
50
|
+
|
51
|
+
@staticmethod
|
52
|
+
def ragatouille_retriever_pack(pack_path="./ragatouille_pack", args=[], **kwargs):
|
53
|
+
name_ = "RAGatouilleRetrieverPack"
|
54
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
55
|
+
|
56
|
+
@staticmethod
|
57
|
+
def chain_of_table_pack(pack_path="./chain_of_table_pack", args=[], **kwargs):
|
58
|
+
name_ = "ChainOfTablePack"
|
59
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
60
|
+
|
61
|
+
@staticmethod
|
62
|
+
def hybrid_fusion_retriever_pack(
|
63
|
+
pack_path="./hybrid_fusion_pack", args=[], **kwargs
|
64
|
+
):
|
65
|
+
name_ = "HybridFusionRetrieverPack"
|
66
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
67
|
+
|
68
|
+
@staticmethod
|
69
|
+
def neo4j_query_engine_pack(pack_path="./neo4j_pack", args=[], **kwargs):
|
70
|
+
name_ = "Neo4jQueryEnginePack"
|
71
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
72
|
+
|
73
|
+
@staticmethod
|
74
|
+
def llava_completion_pack(pack_path="./llava_pack", args=[], **kwargs):
|
75
|
+
name_ = "LlavaCompletionPack"
|
76
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
77
|
+
|
78
|
+
@staticmethod
|
79
|
+
def sentence_window_retriever_pack(
|
80
|
+
pack_path="./sentence_window_retriever_pack", args=[], **kwargs
|
81
|
+
):
|
82
|
+
name_ = "SentenceWindowRetrieverPack"
|
83
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
84
|
+
|
85
|
+
@staticmethod
|
86
|
+
def dense_x_retrieval_pack(pack_path="./dense_pack", args=[], **kwargs):
|
87
|
+
name_ = "DenseXRetrievalPack"
|
88
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
89
|
+
|
90
|
+
@staticmethod
|
91
|
+
def zephyr_query_engine_pack(pack_path="./zephyr_pack", args=[], **kwargs):
|
92
|
+
name_ = "ZephyrQueryEnginePack"
|
93
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
94
|
+
|
95
|
+
@staticmethod
|
96
|
+
def query_rewriting_retriever_pack(
|
97
|
+
pack_path="./query_rewriting_pack", args=[], **kwargs
|
98
|
+
):
|
99
|
+
name_ = "QueryRewritingRetrieverPack"
|
100
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
101
|
+
|
102
|
+
@staticmethod
|
103
|
+
def fuzzy_citation_engine_pack(
|
104
|
+
pack_path="./fuzzy_citation_pack", args=[], **kwargs
|
105
|
+
):
|
106
|
+
name_ = "FuzzyCitationEnginePack"
|
107
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
108
|
+
|
109
|
+
@staticmethod
|
110
|
+
def multidoc_auto_retriever_pack(
|
111
|
+
pack_path="./multidoc_autoretrieval_pack", args=[], **kwargs
|
112
|
+
):
|
113
|
+
name_ = "MultiDocAutoRetrieverPack"
|
114
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
115
|
+
|
116
|
+
@staticmethod
|
117
|
+
def auto_merging_retriever_pack(
|
118
|
+
pack_path="./auto_merging_retriever_pack", args=[], **kwargs
|
119
|
+
):
|
120
|
+
name_ = "AutoMergingRetrieverPack"
|
121
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
122
|
+
|
123
|
+
@staticmethod
|
124
|
+
def voyage_query_engine_pack(pack_path="./voyage_pack", args=[], **kwargs):
|
125
|
+
name_ = "VoyageQueryEnginePack"
|
126
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
127
|
+
|
128
|
+
@staticmethod
|
129
|
+
def mix_self_consistency_pack(
|
130
|
+
pack_path="./mix_self_consistency_pack", args=[], **kwargs
|
131
|
+
):
|
132
|
+
name_ = "MixSelfConsistencyPack"
|
133
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
134
|
+
|
135
|
+
@staticmethod
|
136
|
+
def rag_fusion_pipeline_pack(
|
137
|
+
pack_path="./rag_fusion_pipeline_pack", args=[], **kwargs
|
138
|
+
):
|
139
|
+
name_ = "RAGFusionPipelinePack"
|
140
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
141
|
+
|
142
|
+
@staticmethod
|
143
|
+
def multi_document_agents_pack(
|
144
|
+
pack_path="./multi_doc_agents_pack", args=[], **kwargs
|
145
|
+
):
|
146
|
+
name_ = "MultiDocumentAgentsPack"
|
147
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
148
|
+
|
149
|
+
@staticmethod
|
150
|
+
def llama_guard_moderator_pack(pack_path="./llamaguard_pack", args=[], **kwargs):
|
151
|
+
name_ = "LlamaGuardModeratorPack"
|
152
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
153
|
+
|
154
|
+
@staticmethod
|
155
|
+
def evaluator_benchmarker_pack(
|
156
|
+
pack_path="./eval_benchmark_pack", args=[], **kwargs
|
157
|
+
):
|
158
|
+
name_ = "EvaluatorBenchmarkerPack"
|
159
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
160
|
+
|
161
|
+
@staticmethod
|
162
|
+
def amazon_product_extraction_pack(
|
163
|
+
pack_path="./amazon_product_extraction_pack", args=[], **kwargs
|
164
|
+
):
|
165
|
+
name_ = "AmazonProductExtractionPack"
|
166
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
167
|
+
|
168
|
+
@staticmethod
|
169
|
+
def llama_dataset_metadata_pack(
|
170
|
+
pack_path="./llama_dataset_metadata_pack", args=[], **kwargs
|
171
|
+
):
|
172
|
+
name_ = "LlamaDatasetMetadataPack"
|
173
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
174
|
+
|
175
|
+
@staticmethod
|
176
|
+
def multi_tenancy_rag_pack(pack_path="./multitenancy_rag_pack", args=[], **kwargs):
|
177
|
+
name_ = "MultiTenancyRAGPack"
|
178
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
179
|
+
|
180
|
+
@staticmethod
|
181
|
+
def gmail_openai_agent_pack(pack_path="./gmail_pack", args=[], **kwargs):
|
182
|
+
name_ = "GmailOpenAIAgentPack"
|
183
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
184
|
+
|
185
|
+
@staticmethod
|
186
|
+
def snowflake_query_engine_pack(pack_path="./snowflake_pack", args=[], **kwargs):
|
187
|
+
name_ = "SnowflakeQueryEnginePack"
|
188
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
189
|
+
|
190
|
+
@staticmethod
|
191
|
+
def agent_search_retriever_pack(pack_path="./agent_search_pack", args=[], **kwargs):
|
192
|
+
name_ = "AgentSearchRetrieverPack"
|
193
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
194
|
+
|
195
|
+
@staticmethod
|
196
|
+
def vectara_rag_pack(pack_path="./vectara_rag_pack", args=[], **kwargs):
|
197
|
+
name_ = "VectaraRagPack"
|
198
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
199
|
+
|
200
|
+
@staticmethod
|
201
|
+
def chroma_autoretrieval_pack(pack_path="./chroma_pack", args=[], **kwargs):
|
202
|
+
name_ = "ChromaAutoretrievalPack"
|
203
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
204
|
+
|
205
|
+
@staticmethod
|
206
|
+
def arize_phoenix_query_engine_pack(pack_path="./arize_pack", args=[], **kwargs):
|
207
|
+
name_ = "ArizePhoenixQueryEnginePack"
|
208
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
209
|
+
|
210
|
+
@staticmethod
|
211
|
+
def redis_ingestion_pipeline_pack(
|
212
|
+
pack_path="./redis_ingestion_pack", args=[], **kwargs
|
213
|
+
):
|
214
|
+
name_ = "RedisIngestionPipelinePack"
|
215
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
216
|
+
|
217
|
+
@staticmethod
|
218
|
+
def nebula_graph_query_engine_pack(
|
219
|
+
pack_path="./nebulagraph_pack", args=[], **kwargs
|
220
|
+
):
|
221
|
+
name_ = "NebulaGraphQueryEnginePack"
|
222
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
223
|
+
|
224
|
+
@staticmethod
|
225
|
+
def weaviate_retry_engine_pack(pack_path="./weaviate_pack", args=[], **kwargs):
|
226
|
+
name_ = "WeaviateRetryEnginePack"
|
227
|
+
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
File without changes
|
@@ -0,0 +1,36 @@
|
|
1
|
+
import subprocess
|
2
|
+
from lionagi.libs import SysUtil
|
3
|
+
|
4
|
+
|
5
|
+
def get_pytorch_install_command():
|
6
|
+
cpu_arch = SysUtil.get_cpu_architecture()
|
7
|
+
|
8
|
+
if cpu_arch == "apple_silicon":
|
9
|
+
return "pip install --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu"
|
10
|
+
else:
|
11
|
+
# Default CPU installation
|
12
|
+
return "pip install torch torchvision torchaudio"
|
13
|
+
|
14
|
+
|
15
|
+
def install_pytorch():
|
16
|
+
command = get_pytorch_install_command()
|
17
|
+
try:
|
18
|
+
subprocess.run(command.split(), check=True)
|
19
|
+
print("PyTorch installed successfully.")
|
20
|
+
except subprocess.CalledProcessError as e:
|
21
|
+
print(f"Failed to install PyTorch: {e}")
|
22
|
+
|
23
|
+
|
24
|
+
def install_transformers():
|
25
|
+
if not SysUtil.is_package_installed("torch"):
|
26
|
+
in_ = input(
|
27
|
+
"PyTorch is required for transformers. Would you like to install it now? (y/n): "
|
28
|
+
)
|
29
|
+
if in_ == "y":
|
30
|
+
install_pytorch()
|
31
|
+
if not SysUtil.is_package_installed("transformers"):
|
32
|
+
in_ = input(
|
33
|
+
"transformers is required. Would you like to install it now? (y/n): "
|
34
|
+
)
|
35
|
+
if in_ == "y":
|
36
|
+
SysUtil.install_import(package_name="transformers", import_name="pipeline")
|
@@ -1 +1 @@
|
|
1
|
-
model = "
|
1
|
+
model = "llama3"
|