lionagi 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (103) hide show
  1. lionagi/core/agent/base_agent.py +2 -3
  2. lionagi/core/branch/base.py +1 -1
  3. lionagi/core/branch/branch.py +2 -1
  4. lionagi/core/branch/flow_mixin.py +1 -1
  5. lionagi/core/branch/util.py +1 -1
  6. lionagi/core/execute/base_executor.py +1 -4
  7. lionagi/core/execute/branch_executor.py +66 -3
  8. lionagi/core/execute/instruction_map_executor.py +48 -0
  9. lionagi/core/execute/neo4j_executor.py +381 -0
  10. lionagi/core/execute/structure_executor.py +120 -4
  11. lionagi/core/flow/monoflow/ReAct.py +21 -19
  12. lionagi/core/flow/monoflow/chat_mixin.py +1 -1
  13. lionagi/core/flow/monoflow/followup.py +14 -13
  14. lionagi/core/flow/polyflow/__init__.py +1 -1
  15. lionagi/core/generic/component.py +197 -122
  16. lionagi/core/generic/condition.py +3 -1
  17. lionagi/core/generic/edge.py +77 -25
  18. lionagi/core/graph/graph.py +1 -1
  19. lionagi/core/mail/mail_manager.py +3 -2
  20. lionagi/core/session/session.py +1 -1
  21. lionagi/core/tool/tool_manager.py +10 -9
  22. lionagi/experimental/__init__.py +0 -0
  23. lionagi/experimental/directive/__init__.py +0 -0
  24. lionagi/experimental/directive/evaluator/__init__.py +0 -0
  25. lionagi/experimental/directive/evaluator/ast_evaluator.py +115 -0
  26. lionagi/experimental/directive/evaluator/base_evaluator.py +202 -0
  27. lionagi/experimental/directive/evaluator/sandbox_.py +14 -0
  28. lionagi/experimental/directive/evaluator/script_engine.py +83 -0
  29. lionagi/experimental/directive/parser/__init__.py +0 -0
  30. lionagi/experimental/directive/parser/base_parser.py +215 -0
  31. lionagi/experimental/directive/schema.py +36 -0
  32. lionagi/experimental/directive/template_/__init__.py +0 -0
  33. lionagi/experimental/directive/template_/base_template.py +63 -0
  34. lionagi/experimental/report/__init__.py +0 -0
  35. lionagi/experimental/report/form.py +64 -0
  36. lionagi/experimental/report/report.py +138 -0
  37. lionagi/experimental/report/util.py +47 -0
  38. lionagi/experimental/tool/__init__.py +0 -0
  39. lionagi/experimental/tool/function_calling.py +43 -0
  40. lionagi/experimental/tool/manual.py +66 -0
  41. lionagi/experimental/tool/schema.py +59 -0
  42. lionagi/experimental/tool/tool_manager.py +138 -0
  43. lionagi/experimental/tool/util.py +16 -0
  44. lionagi/experimental/validator/__init__.py +0 -0
  45. lionagi/experimental/validator/rule.py +139 -0
  46. lionagi/experimental/validator/validator.py +56 -0
  47. lionagi/experimental/work/__init__.py +10 -0
  48. lionagi/experimental/work/async_queue.py +54 -0
  49. lionagi/experimental/work/schema.py +73 -0
  50. lionagi/experimental/work/work_function.py +67 -0
  51. lionagi/experimental/work/worker.py +56 -0
  52. lionagi/experimental/work2/__init__.py +0 -0
  53. lionagi/experimental/work2/form.py +371 -0
  54. lionagi/experimental/work2/report.py +289 -0
  55. lionagi/experimental/work2/schema.py +30 -0
  56. lionagi/experimental/work2/tests.py +72 -0
  57. lionagi/experimental/work2/util.py +0 -0
  58. lionagi/experimental/work2/work.py +0 -0
  59. lionagi/experimental/work2/work_function.py +89 -0
  60. lionagi/experimental/work2/worker.py +12 -0
  61. lionagi/integrations/bridge/autogen_/__init__.py +0 -0
  62. lionagi/integrations/bridge/autogen_/autogen_.py +124 -0
  63. lionagi/integrations/bridge/llamaindex_/get_index.py +294 -0
  64. lionagi/integrations/bridge/llamaindex_/llama_pack.py +227 -0
  65. lionagi/integrations/bridge/transformers_/__init__.py +0 -0
  66. lionagi/integrations/bridge/transformers_/install_.py +36 -0
  67. lionagi/integrations/config/oai_configs.py +1 -1
  68. lionagi/integrations/config/ollama_configs.py +1 -1
  69. lionagi/integrations/config/openrouter_configs.py +1 -1
  70. lionagi/integrations/storage/__init__.py +3 -0
  71. lionagi/integrations/storage/neo4j.py +673 -0
  72. lionagi/integrations/storage/storage_util.py +289 -0
  73. lionagi/integrations/storage/structure_excel.py +268 -0
  74. lionagi/integrations/storage/to_csv.py +63 -0
  75. lionagi/integrations/storage/to_excel.py +76 -0
  76. lionagi/libs/__init__.py +4 -0
  77. lionagi/libs/ln_knowledge_graph.py +405 -0
  78. lionagi/libs/ln_queue.py +101 -0
  79. lionagi/libs/ln_tokenizer.py +57 -0
  80. lionagi/libs/sys_util.py +1 -1
  81. lionagi/lions/__init__.py +0 -0
  82. lionagi/lions/coder/__init__.py +0 -0
  83. lionagi/lions/coder/add_feature.py +20 -0
  84. lionagi/lions/coder/base_prompts.py +22 -0
  85. lionagi/lions/coder/coder.py +121 -0
  86. lionagi/lions/coder/util.py +91 -0
  87. lionagi/lions/researcher/__init__.py +0 -0
  88. lionagi/lions/researcher/data_source/__init__.py +0 -0
  89. lionagi/lions/researcher/data_source/finhub_.py +191 -0
  90. lionagi/lions/researcher/data_source/google_.py +199 -0
  91. lionagi/lions/researcher/data_source/wiki_.py +96 -0
  92. lionagi/lions/researcher/data_source/yfinance_.py +21 -0
  93. lionagi/tests/libs/test_queue.py +67 -0
  94. lionagi/tests/test_core/generic/__init__.py +0 -0
  95. lionagi/tests/test_core/generic/test_component.py +89 -0
  96. lionagi/tests/test_core/test_branch.py +0 -1
  97. lionagi/version.py +1 -1
  98. {lionagi-0.1.0.dist-info → lionagi-0.1.2.dist-info}/METADATA +1 -1
  99. lionagi-0.1.2.dist-info/RECORD +206 -0
  100. lionagi-0.1.0.dist-info/RECORD +0 -136
  101. {lionagi-0.1.0.dist-info → lionagi-0.1.2.dist-info}/LICENSE +0 -0
  102. {lionagi-0.1.0.dist-info → lionagi-0.1.2.dist-info}/WHEEL +0 -0
  103. {lionagi-0.1.0.dist-info → lionagi-0.1.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,72 @@
1
+ from .schema import Work, WorkStatus
2
+ from ..work.worklog import WorkLog
3
+ from .work_function import WorkFunction
4
+
5
+ import unittest
6
+ from unittest.mock import AsyncMock, patch
7
+
8
+ from lionagi.libs import func_call
9
+
10
+
11
+ class TestWork(unittest.TestCase):
12
+ def setUp(self):
13
+ self.work = Work(form_id="123")
14
+
15
+ def test_initial_status(self):
16
+ """Test the initial status is set to PENDING."""
17
+ self.assertEqual(self.work.status, WorkStatus.PENDING)
18
+
19
+ def test_initial_deliverables(self):
20
+ """Test the initial deliverables are empty."""
21
+ self.assertEqual(self.work.deliverables, {})
22
+
23
+ def test_initial_dependencies(self):
24
+ """Test the initial dependencies are empty."""
25
+ self.assertEqual(self.work.dependencies, [])
26
+
27
+
28
+ class TestWorkLog(unittest.TestCase):
29
+ def setUp(self):
30
+ self.work_log = WorkLog()
31
+ self.work = Work(form_id="123")
32
+ self.work_log.append(self.work)
33
+
34
+ def test_append_work(self):
35
+ """Test appending work adds to logs and pending queue."""
36
+ self.assertIn("123", self.work_log.logs)
37
+ self.assertIn("123", self.work_log.pending)
38
+
39
+ def test_get_by_status(self):
40
+ """Test retrieving works by status."""
41
+ result = self.work_log.get_by_status(WorkStatus.PENDING)
42
+ self.assertEqual(result, {"123": self.work})
43
+
44
+
45
+ class TestWorkFunction(unittest.TestCase):
46
+ def setUp(self):
47
+ self.work_function = WorkFunction(function=AsyncMock(return_value="result"))
48
+ self.work = Work(form_id="123")
49
+ self.work_log = WorkLog()
50
+ self.work_log.append(self.work)
51
+ self.work_function.worklog = self.work_log
52
+
53
+ @patch("asyncio.sleep", new_callable=AsyncMock)
54
+ async def test_execute(self, mocked_sleep):
55
+ """Test executing work changes its status and handles results."""
56
+ with patch.object(func_call, "rcall", new_callable=AsyncMock) as mock_rcall:
57
+ mock_rcall.return_value = "completed"
58
+ await self.work_function.execute()
59
+ self.assertEqual(self.work.status, WorkStatus.COMPLETED)
60
+ self.assertNotIn("123", self.work_function.worklog.pending)
61
+
62
+ @patch("asyncio.sleep", new_callable=AsyncMock)
63
+ async def test_execute_failure(self, mocked_sleep):
64
+ """Test handling failure during work execution."""
65
+ with patch.object(func_call, "rcall", side_effect=Exception("Error")):
66
+ await self.work_function.execute()
67
+ self.assertEqual(self.work.status, WorkStatus.FAILED)
68
+ self.assertIn("123", self.work_function.worklog.errored)
69
+
70
+
71
+ if __name__ == "__main__":
72
+ unittest.main()
File without changes
File without changes
@@ -0,0 +1,89 @@
1
+ import asyncio
2
+ from typing import Any, Callable, Dict, List
3
+ from pydantic import Field
4
+ from functools import wraps
5
+ from lionagi import logging as _logging
6
+ from lionagi.libs import func_call
7
+ from lionagi.core.generic import BaseComponent
8
+
9
+ from .schema import Work, WorkStatus
10
+ from ..work.worklog import WorkLog
11
+ from .worker import Worker
12
+
13
+
14
+ class WorkFunction(BaseComponent):
15
+ """Work function management and execution."""
16
+
17
+ function: Callable
18
+ args: List[Any] = Field(default_factory=list)
19
+ kwargs: Dict[str, Any] = Field(default_factory=dict)
20
+ retry_kwargs: Dict[str, Any] = Field(default_factory=dict)
21
+ worklog: WorkLog = Field(default_factory=WorkLog)
22
+ instruction: str = Field(
23
+ default="", description="Instruction for the work function"
24
+ )
25
+ refresh_time: float = Field(
26
+ default=0.5, description="Time to wait before checking for pending work"
27
+ )
28
+
29
+ @property
30
+ def name(self):
31
+ """Get the name of the work function."""
32
+ return self.function.__name__
33
+
34
+ async def execute(self):
35
+ """Execute pending work items."""
36
+ while self.worklog.pending:
37
+ work_id = self.worklog.pending.popleft()
38
+ work = self.worklog.logs[work_id]
39
+ if work.status == WorkStatus.PENDING:
40
+ try:
41
+ await func_call.rcall(self._execute, work, **work.retry_kwargs)
42
+ except Exception as e:
43
+ work.status = WorkStatus.FAILED
44
+ _logging.error(f"Work {work.id_} failed with error: {e}")
45
+ self.worklog.errored.append(work.id_)
46
+ else:
47
+ _logging.warning(
48
+ f"Work {work.id_} is in {work.status} state "
49
+ "and cannot be executed."
50
+ )
51
+ await asyncio.sleep(self.refresh_time)
52
+
53
+ async def _execute(self, work: Work):
54
+ """Execute a single work item."""
55
+ work.status = WorkStatus.IN_PROGRESS
56
+ result = await self.function(*self.args, **self.kwargs)
57
+ work.deliverables = result
58
+ work.status = WorkStatus.COMPLETED
59
+ return result
60
+
61
+
62
+ def workfunc(func):
63
+
64
+ @wraps(func)
65
+ async def wrapper(self: Worker, *args, **kwargs):
66
+ # Retrieve the worker instance ('self')
67
+ if not hasattr(self, "work_functions"):
68
+ self.work_functions = {}
69
+
70
+ if func.__name__ not in self.work_functions:
71
+ # Create WorkFunction with the function and its docstring as instruction
72
+ self.work_functions[func.__name__] = WorkFunction(
73
+ function=func,
74
+ instruction=func.__doc__,
75
+ args=args,
76
+ kwargs=kwargs,
77
+ retry_kwargs=kwargs.pop("retry_kwargs", {}),
78
+ )
79
+
80
+ # Retrieve the existing WorkFunction
81
+ work_function: WorkFunction = self.work_functions[func.__name__]
82
+ # Update args and kwargs for this call
83
+ work_function.args = args
84
+ work_function.kwargs = kwargs
85
+
86
+ # Execute the function using WorkFunction's managed execution process
87
+ return await work_function.execute()
88
+
89
+ return wrapper
@@ -0,0 +1,12 @@
1
+ from abc import ABC
2
+ from pydantic import Field
3
+ from lionagi.core.generic import BaseComponent
4
+
5
+
6
+ class Worker(BaseComponent, ABC):
7
+ form_templates: dict = Field(
8
+ default={}, description="The form templates of the worker"
9
+ )
10
+ work_functions: dict = Field(
11
+ default={}, description="The work functions of the worker"
12
+ )
File without changes
@@ -0,0 +1,124 @@
1
+ from typing import Dict, Union
2
+
3
+
4
+ def get_ipython_user_proxy():
5
+
6
+ try:
7
+ from lionagi.libs import SysUtil
8
+
9
+ SysUtil.check_import("autogen", pip_name="pyautogen")
10
+
11
+ import autogen
12
+ from IPython import get_ipython
13
+ except Exception as e:
14
+ raise ImportError(f"Please install autogen and IPython. {e}")
15
+
16
+ class IPythonUserProxyAgent(autogen.UserProxyAgent):
17
+
18
+ def __init__(self, name: str, **kwargs):
19
+ super().__init__(name, **kwargs)
20
+ self._ipython = get_ipython()
21
+
22
+ def generate_init_message(self, *args, **kwargs) -> Union[str, Dict]:
23
+ return (
24
+ super().generate_init_message(*args, **kwargs)
25
+ + """If you suggest code, the code will be executed in IPython."""
26
+ )
27
+
28
+ def run_code(self, code, **kwargs):
29
+ result = self._ipython.run_cell("%%capture --no-display cap\n" + code)
30
+ log = self._ipython.ev("cap.stdout")
31
+ log += self._ipython.ev("cap.stderr")
32
+ if result.result is not None:
33
+ log += str(result.result)
34
+ exitcode = 0 if result.success else 1
35
+ if result.error_before_exec is not None:
36
+ log += f"\n{result.error_before_exec}"
37
+ exitcode = 1
38
+ if result.error_in_exec is not None:
39
+ log += f"\n{result.error_in_exec}"
40
+ exitcode = 1
41
+ return exitcode, log, None
42
+
43
+ return IPythonUserProxyAgent
44
+
45
+
46
+ def get_autogen_coder(
47
+ llm_config=None,
48
+ code_execution_config=None,
49
+ kernal="python",
50
+ config_list=None,
51
+ max_consecutive_auto_reply=15,
52
+ temperature=0,
53
+ cache_seed=42,
54
+ env_="local",
55
+ assistant_instruction=None,
56
+ ):
57
+ assistant = ""
58
+ try:
59
+ from lionagi.libs import SysUtil
60
+
61
+ SysUtil.check_import("autogen", pip_name="pyautogen")
62
+
63
+ import autogen
64
+ from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
65
+ except Exception as e:
66
+ raise ImportError(f"Please install autogen. {e}")
67
+
68
+ if env_ == "local":
69
+ assistant = autogen.AssistantAgent(
70
+ name="assistant",
71
+ llm_config=llm_config
72
+ or {
73
+ "cache_seed": cache_seed,
74
+ "config_list": config_list,
75
+ "temperature": temperature,
76
+ },
77
+ )
78
+
79
+ elif env_ == "oai_assistant":
80
+ assistant = GPTAssistantAgent(
81
+ name="Coder Assistant",
82
+ llm_config={
83
+ "tools": [{"type": "code_interpreter"}],
84
+ "config_list": config_list,
85
+ },
86
+ instructions=assistant_instruction,
87
+ )
88
+
89
+ if kernal == "python":
90
+ user_proxy = autogen.UserProxyAgent(
91
+ name="user_proxy",
92
+ human_input_mode="NEVER",
93
+ max_consecutive_auto_reply=max_consecutive_auto_reply,
94
+ is_termination_msg=lambda x: x.get("content", "")
95
+ .rstrip()
96
+ .endswith("TERMINATE"),
97
+ code_execution_config=code_execution_config
98
+ or {
99
+ "work_dir": "coding",
100
+ "use_docker": False,
101
+ },
102
+ )
103
+ return user_proxy, assistant
104
+
105
+ elif kernal == "ipython":
106
+ user_proxy = get_ipython_user_proxy(
107
+ "ipython_user_proxy",
108
+ human_input_mode="NEVER",
109
+ max_consecutive_auto_reply=max_consecutive_auto_reply,
110
+ is_termination_msg=lambda x: x.get("content", "")
111
+ .rstrip()
112
+ .endswith("TERMINATE")
113
+ or x.get("content", "").rstrip().endswith('"TERMINATE".'),
114
+ )
115
+ return user_proxy, assistant
116
+
117
+ # # Sample Usage Pattern
118
+ # context = "def my_function():\n pass\n"
119
+ # task1 = "I need help with the following code:\n"
120
+ # task2 = "Please write a function that returns the sum of two numbers."
121
+
122
+ # user_proxy, assistant = get_autogen_coder()
123
+ # user_proxy.initiate_chat(assistant, message=task1+context)
124
+ # user_proxy.send(recipient=assistant, message=task2)
@@ -0,0 +1,294 @@
1
+ # TODO: Refactor this code to use the new llama_index API
2
+
3
+ # class BaseIndex:
4
+
5
+ # @staticmethod
6
+ # def _get_index(
7
+ # input_=None,
8
+ # # default to OpenAI
9
+ # llm=None,
10
+ # llm_provider=None,
11
+ # llm_kwargs={},
12
+ # service_context=None,
13
+ # service_context_kwargs={},
14
+ # index_type=None,
15
+ # index_kwargs={},
16
+ # rerank_=False,
17
+ # reranker_type=None,
18
+ # reranker=None,
19
+ # rerank_kwargs={},
20
+ # get_engine=False,
21
+ # engine_kwargs={},
22
+ # from_storage=False,
23
+ # storage_context=None,
24
+ # strorage_context_kwargs={},
25
+ # index_id=None,
26
+ # load_index_from_storage_kwargs={},
27
+ # ):
28
+ # """
29
+ # Creates and returns an index or query engine based on the provided parameters.
30
+
31
+ # Args:
32
+ # chunks: The input data to be indexed or queried.
33
+ # llm: An instance of a language model for indexing or querying.
34
+ # llm_provider: A function to provide an instance of a language model.
35
+ # llm_kwargs: Keyword arguments for configuring the language model.
36
+ # service_context: An instance of a service context.
37
+ # service_context_kwargs: Keyword arguments for configuring the service context.
38
+ # index_type: The type of index to create.
39
+ # index_kwargs: Keyword arguments for configuring the index.
40
+ # rerank_: Boolean flag indicating whether reranking should be applied.
41
+ # reranker_type: The type of reranker to use.
42
+ # reranker: An instance of a reranker.
43
+ # rerank_kwargs: Keyword arguments for configuring the reranker.
44
+ # get_engine: Boolean flag indicating whether to return a query engine.
45
+ # engine_kwargs: Keyword arguments for configuring the query engine.
46
+
47
+ # Returns:
48
+ # Index or Query Engine: Depending on the 'get_engine' flag, returns an index or query engine.
49
+
50
+ # Raises:
51
+ # Various exceptions if there are errors in creating the index or query engine.
52
+ # """
53
+
54
+ # if from_storage:
55
+ # from llama_index import StorageContext, load_index_from_storage
56
+
57
+ # storage_context = StorageContext.from_defaults(**strorage_context_kwargs)
58
+
59
+ # if index_id:
60
+ # index = load_index_from_storage(
61
+ # storage_context=storage_context,
62
+ # index_id=index_id,
63
+ # **load_index_from_storage_kwargs,
64
+ # )
65
+ # else:
66
+ # raise ValueError("Index ID is required for loading from storage.")
67
+
68
+ # if rerank_:
69
+ # if not reranker:
70
+ # if not reranker_type:
71
+ # from llama_index.postprocessor import LLMRerank
72
+
73
+ # reranker_type = LLMRerank
74
+ # reranker = reranker_type(
75
+ # service_context=service_context, **rerank_kwargs
76
+ # )
77
+ # engine_kwargs.update({"node_postprocessors": [reranker]})
78
+
79
+ # if get_engine:
80
+ # return (index, index.as_query_engine(**engine_kwargs))
81
+ # return index
82
+
83
+ # if not llm:
84
+ # if llm_provider:
85
+ # llm = llm_provider(**llm_kwargs)
86
+ # else:
87
+ # from llama_index.llms import OpenAI
88
+
89
+ # llm = OpenAI(**llm_kwargs)
90
+
91
+ # if not service_context:
92
+ # from llama_index import ServiceContext
93
+
94
+ # service_context = ServiceContext.from_defaults(
95
+ # llm=llm, **service_context_kwargs
96
+ # )
97
+
98
+ # if not index_type:
99
+ # from llama_index import VectorStoreIndex
100
+
101
+ # index_type = VectorStoreIndex
102
+
103
+ # index = index_type(input_, service_context=service_context, **index_kwargs)
104
+
105
+ # if index_id:
106
+ # index.index_id = index_id
107
+
108
+ # if rerank_:
109
+ # if not reranker:
110
+ # if not reranker_type:
111
+ # from llama_index.postprocessor import LLMRerank
112
+
113
+ # reranker_type = LLMRerank
114
+ # reranker = reranker_type(
115
+ # service_context=service_context, **rerank_kwargs
116
+ # )
117
+ # engine_kwargs.update({"node_postprocessors": [reranker]})
118
+
119
+ # if get_engine:
120
+ # return (index, index.as_query_engine(**engine_kwargs))
121
+ # return index
122
+
123
+
124
+ # class LlamaIndex:
125
+
126
+ # @staticmethod
127
+ # def kg_index(
128
+ # input_=None,
129
+ # # default to OpenAI
130
+ # llm=None,
131
+ # llm_provider=None,
132
+ # llm_kwargs={"temperature": 0.1, "model": "gpt-4-1106-preview"},
133
+ # service_context=None,
134
+ # service_context_kwargs={},
135
+ # index_kwargs={"include_embeddings": True},
136
+ # rerank_=False,
137
+ # reranker_type=None,
138
+ # reranker=None,
139
+ # rerank_kwargs={"choice_batch_size": 5, "top_n": 3},
140
+ # get_engine=False,
141
+ # engine_kwargs={"similarity_top_k": 3, "response_mode": "tree_summarize"},
142
+ # kg_triplet_extract_fn=None,
143
+ # from_storage=False,
144
+ # storage_context=None,
145
+ # strorage_context_kwargs={},
146
+ # index_id=None,
147
+ # load_index_from_storage_kwargs={},
148
+ # ):
149
+ # """
150
+ # Creates and returns a KnowledgeGraphIndex based on the provided parameters.
151
+
152
+ # Args:
153
+ # chunks: The input data to be indexed.
154
+ # llm: An instance of a language model for indexing.
155
+ # llm_provider: A function to provide an instance of a language model.
156
+ # llm_kwargs: Keyword arguments for configuring the language model.
157
+ # service_context: An instance of a service context.
158
+ # service_context_kwargs: Keyword arguments for configuring the service context.
159
+ # index_kwargs: Keyword arguments for configuring the index.
160
+ # rerank_: Boolean flag indicating whether reranking should be applied.
161
+ # reranker_type: The type of reranker to use.
162
+ # reranker: An instance of a reranker.
163
+ # rerank_kwargs: Keyword arguments for configuring the reranker.
164
+ # get_engine: Boolean flag indicating whether to return a query engine.
165
+ # engine_kwargs: Keyword arguments for configuring the query engine.
166
+ # kg_triplet_extract_fn: Optional function for extracting KG triplets.
167
+
168
+ # Returns:
169
+ # KnowledgeGraphIndex or Query Engine: Depending on the 'get_engine' flag,
170
+ # returns a KnowledgeGraphIndex or query engine.
171
+
172
+ # Raises:
173
+ # Various exceptions if there are errors in creating the index or query engine.
174
+ # """
175
+ # from llama_index import KnowledgeGraphIndex
176
+
177
+ # index_type_ = ""
178
+ # if not from_storage:
179
+ # from llama_index.graph_stores import SimpleGraphStore
180
+ # from llama_index.storage.storage_context import StorageContext
181
+
182
+ # graph_store = SimpleGraphStore()
183
+ # if storage_context is None:
184
+ # storage_context = StorageContext.from_defaults(
185
+ # graph_store=graph_store, **strorage_context_kwargs
186
+ # )
187
+ # index_kwargs.update({"storage_context": storage_context})
188
+ # index_type_ = KnowledgeGraphIndex.from_documents
189
+
190
+ # elif from_storage:
191
+ # index_type_ = KnowledgeGraphIndex
192
+
193
+ # if kg_triplet_extract_fn:
194
+ # index_kwargs.update({"kg_triplet_extract_fn": kg_triplet_extract_fn})
195
+
196
+ # if storage_context is None:
197
+ # from llama_index.graph_stores import SimpleGraphStore
198
+ # from llama_index.storage.storage_context import StorageContext
199
+
200
+ # storage_context = StorageContext.from_defaults(
201
+ # graph_store=SimpleGraphStore(), **strorage_context_kwargs
202
+ # )
203
+
204
+ # return BaseIndex._get_index(
205
+ # input_=input_,
206
+ # llm=llm,
207
+ # llm_provider=llm_provider,
208
+ # llm_kwargs=llm_kwargs,
209
+ # service_context=service_context,
210
+ # service_context_kwargs=service_context_kwargs,
211
+ # index_type=index_type_,
212
+ # index_kwargs=index_kwargs,
213
+ # rerank_=rerank_,
214
+ # reranker_type=reranker_type,
215
+ # reranker=reranker,
216
+ # rerank_kwargs=rerank_kwargs,
217
+ # get_engine=get_engine,
218
+ # engine_kwargs=engine_kwargs,
219
+ # from_storage=from_storage,
220
+ # storage_context=storage_context,
221
+ # strorage_context_kwargs=strorage_context_kwargs,
222
+ # index_id=index_id,
223
+ # load_index_from_storage_kwargs=load_index_from_storage_kwargs,
224
+ # )
225
+
226
+ # @staticmethod
227
+ # def vector_index(
228
+ # input_=None,
229
+ # # default to OpenAI
230
+ # llm=None,
231
+ # llm_provider=None,
232
+ # llm_kwargs={"temperature": 0.1, "model": "gpt-4-1106-preview"},
233
+ # service_context=None,
234
+ # service_context_kwargs={},
235
+ # index_kwargs={"include_embeddings": True},
236
+ # # default to LLMRerank
237
+ # rerank_=False,
238
+ # reranker_type=None,
239
+ # reranker=None,
240
+ # rerank_kwargs={"choice_batch_size": 5, "top_n": 3},
241
+ # get_engine=False,
242
+ # engine_kwargs={"similarity_top_k": 3, "response_mode": "tree_summarize"},
243
+ # from_storage=False,
244
+ # storage_context=None,
245
+ # strorage_context_kwargs={},
246
+ # index_id=None,
247
+ # load_index_from_storage_kwargs={},
248
+ # ):
249
+ # """
250
+ # Creates and returns a vector index or query engine based on the provided parameters.
251
+
252
+ # Args:
253
+ # chunks: The input data to be indexed or queried.
254
+ # llm: An instance of a language model for indexing or querying.
255
+ # llm_provider: A function to provide an instance of a language model.
256
+ # llm_kwargs: Keyword arguments for configuring the language model.
257
+ # service_context: An instance of a service context.
258
+ # service_context_kwargs: Keyword arguments for configuring the service context.
259
+ # index_kwargs: Keyword arguments for configuring the index.
260
+ # rerank_: Boolean flag indicating whether reranking should be applied.
261
+ # reranker_type: The type of reranker to use.
262
+ # reranker: An instance of a reranker.
263
+ # rerank_kwargs: Keyword arguments for configuring the reranker.
264
+ # get_engine: Boolean flag indicating whether to return a query engine.
265
+ # engine_kwargs: Keyword arguments for configuring the query engine.
266
+
267
+ # Returns:
268
+ # Vector Index or Query Engine: Depending on the 'get_engine' flag,
269
+ # returns a vector index or query engine.
270
+
271
+ # Raises:
272
+ # Various exceptions if there are errors in creating the index or query engine.
273
+ # """
274
+
275
+ # return BaseIndex._get_index(
276
+ # input_=input_,
277
+ # llm=llm,
278
+ # llm_provider=llm_provider,
279
+ # llm_kwargs=llm_kwargs,
280
+ # service_context=service_context,
281
+ # service_context_kwargs=service_context_kwargs,
282
+ # index_kwargs=index_kwargs,
283
+ # rerank_=rerank_,
284
+ # reranker_type=reranker_type,
285
+ # reranker=reranker,
286
+ # rerank_kwargs=rerank_kwargs,
287
+ # get_engine=get_engine,
288
+ # engine_kwargs=engine_kwargs,
289
+ # from_storage=from_storage,
290
+ # storage_context=storage_context,
291
+ # strorage_context_kwargs=strorage_context_kwargs,
292
+ # index_id=index_id,
293
+ # load_index_from_storage_kwargs=load_index_from_storage_kwargs,
294
+ # )