reminix-llamaindex 0.0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,208 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ !.env.example
140
+ .envrc
141
+ .venv
142
+ env/
143
+ venv/
144
+ ENV/
145
+ env.bak/
146
+ venv.bak/
147
+
148
+ # Spyder project settings
149
+ .spyderproject
150
+ .spyproject
151
+
152
+ # Rope project settings
153
+ .ropeproject
154
+
155
+ # mkdocs documentation
156
+ /site
157
+
158
+ # mypy
159
+ .mypy_cache/
160
+ .dmypy.json
161
+ dmypy.json
162
+
163
+ # Pyre type checker
164
+ .pyre/
165
+
166
+ # pytype static type analyzer
167
+ .pytype/
168
+
169
+ # Cython debug symbols
170
+ cython_debug/
171
+
172
+ # PyCharm
173
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
174
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
175
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
176
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
177
+ #.idea/
178
+
179
+ # Abstra
180
+ # Abstra is an AI-powered process automation framework.
181
+ # Ignore directories containing user credentials, local state, and settings.
182
+ # Learn more at https://abstra.io/docs
183
+ .abstra/
184
+
185
+ # Visual Studio Code
186
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
187
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
188
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
189
+ # you could uncomment the following to ignore the entire vscode folder
190
+ # .vscode/
191
+
192
+ # Ruff stuff:
193
+ .ruff_cache/
194
+
195
+ # PyPI configuration file
196
+ .pypirc
197
+
198
+ # Cursor
199
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
200
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
201
+ # refer to https://docs.cursor.com/context/ignore-files
202
+ .cursorignore
203
+ .cursorindexingignore
204
+
205
+ # Marimo
206
+ marimo/_static/
207
+ marimo/_lsp/
208
+ __marimo__/
@@ -0,0 +1,201 @@
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
@@ -0,0 +1,191 @@
1
+ Metadata-Version: 2.4
2
+ Name: reminix-llamaindex
3
+ Version: 0.0.4
4
+ Summary: Reminix adapter for LlamaIndex - serve agents as REST APIs
5
+ Project-URL: Homepage, https://reminix.com
6
+ Project-URL: Documentation, https://reminix.com/docs
7
+ Project-URL: Repository, https://github.com/reminix-ai/runtime-python
8
+ Project-URL: Changelog, https://github.com/reminix-ai/runtime-python/blob/main/CHANGELOG.md
9
+ Project-URL: Bug Tracker, https://github.com/reminix-ai/runtime-python/issues
10
+ Author-email: Reminix Team <team@reminix.com>
11
+ License-Expression: Apache-2.0
12
+ License-File: LICENSE
13
+ Keywords: agents,ai,llama-index,llamaindex,llm,rag,reminix
14
+ Classifier: Development Status :: 4 - Beta
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: License :: OSI Approved :: Apache Software License
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Programming Language :: Python :: 3.11
20
+ Classifier: Programming Language :: Python :: 3.12
21
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
22
+ Requires-Python: >=3.10
23
+ Requires-Dist: llama-index>=0.14.0
24
+ Requires-Dist: reminix-runtime~=0.0.4
25
+ Provides-Extra: dev
26
+ Requires-Dist: pytest-asyncio>=0.24.0; extra == 'dev'
27
+ Requires-Dist: pytest>=8.0.0; extra == 'dev'
28
+ Description-Content-Type: text/markdown
29
+
30
+ # reminix-llamaindex
31
+
32
+ Reminix Runtime adapter for [LlamaIndex](https://www.llamaindex.ai/). Serve LlamaIndex chat engines as a REST API.
33
+
34
+ > **Ready to go live?** [Deploy to Reminix Cloud](https://reminix.com/docs/deployment) for zero-config hosting, or [self-host](https://reminix.com/docs/deployment/self-hosting) on your own infrastructure.
35
+
36
+ ## Installation
37
+
38
+ ```bash
39
+ pip install reminix-llamaindex
40
+ ```
41
+
42
+ This will also install `reminix-runtime` as a dependency.
43
+
44
+ ## Quick Start
45
+
46
+ ```python
47
+ from llama_index.core.chat_engine import SimpleChatEngine
48
+ from llama_index.llms.openai import OpenAI
49
+ from reminix_llamaindex import wrap_and_serve
50
+
51
+ llm = OpenAI(model="gpt-4o")
52
+ engine = SimpleChatEngine.from_defaults(llm=llm)
53
+ wrap_and_serve(engine, name="my-chatbot", port=8080)
54
+ ```
55
+
56
+ For more flexibility (e.g., serving multiple agents), use `wrap` and `serve` separately:
57
+
58
+ ```python
59
+ from llama_index.core.chat_engine import SimpleChatEngine
60
+ from llama_index.llms.openai import OpenAI
61
+ from reminix_llamaindex import wrap
62
+ from reminix_runtime import serve
63
+
64
+ llm = OpenAI(model="gpt-4o")
65
+ engine = SimpleChatEngine.from_defaults(llm=llm)
66
+ agent = wrap(engine, name="my-chatbot")
67
+ serve([agent], port=8080)
68
+ ```
69
+
70
+ Your agent is now available at:
71
+ - `POST /agents/my-chatbot/invoke` - Stateless invocation
72
+ - `POST /agents/my-chatbot/chat` - Conversational chat
73
+
74
+ ## API Reference
75
+
76
+ ### `wrap_and_serve(engine, name, port, host)`
77
+
78
+ Wrap a LlamaIndex chat engine and serve it immediately. Combines `wrap` and `serve` for single-agent setups.
79
+
80
+ | Parameter | Type | Default | Description |
81
+ |-----------|------|---------|-------------|
82
+ | `engine` | `BaseChatEngine` | required | A LlamaIndex chat engine |
83
+ | `name` | `str` | `"llamaindex-agent"` | Name for the agent (used in URL path) |
84
+ | `port` | `int` | `8080` | Port to serve on |
85
+ | `host` | `str` | `"0.0.0.0"` | Host to bind to |
86
+
87
+ ### `wrap(engine, name)`
88
+
89
+ Wrap a LlamaIndex chat engine for use with Reminix Runtime. Use this with `serve` from `reminix_runtime` for multi-agent setups.
90
+
91
+ | Parameter | Type | Default | Description |
92
+ |-----------|------|---------|-------------|
93
+ | `engine` | `BaseChatEngine` | required | A LlamaIndex chat engine |
94
+ | `name` | `str` | `"llamaindex-agent"` | Name for the agent (used in URL path) |
95
+
96
+ **Returns:** `LlamaIndexAdapter` - A Reminix adapter instance
97
+
98
+ ### Example with RAG
99
+
100
+ ```python
101
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
102
+ from llama_index.llms.openai import OpenAI
103
+ from reminix_llamaindex import wrap
104
+ from reminix_runtime import serve
105
+
106
+ # Load documents and create index
107
+ documents = SimpleDirectoryReader("./data").load_data()
108
+ index = VectorStoreIndex.from_documents(documents)
109
+
110
+ # Create a chat engine with the index
111
+ engine = index.as_chat_engine(llm=OpenAI(model="gpt-4o"))
112
+
113
+ # Wrap and serve
114
+ agent = wrap(engine, name="rag-chatbot")
115
+ serve([agent], port=8080)
116
+ ```
117
+
118
+ ## Endpoint Input/Output Formats
119
+
120
+ ### POST /agents/{name}/invoke
121
+
122
+ Stateless invocation for task-oriented operations.
123
+
124
+ **Request:**
125
+ ```json
126
+ {
127
+ "input": {
128
+ "query": "What is the capital of France?"
129
+ }
130
+ }
131
+ ```
132
+
133
+ Or with prompt:
134
+ ```json
135
+ {
136
+ "input": {
137
+ "prompt": "Summarize this text: ..."
138
+ }
139
+ }
140
+ ```
141
+
142
+ **Response:**
143
+ ```json
144
+ {
145
+ "output": "The capital of France is Paris."
146
+ }
147
+ ```
148
+
149
+ ### POST /agents/{name}/chat
150
+
151
+ Conversational chat. The adapter extracts the last user message.
152
+
153
+ **Request:**
154
+ ```json
155
+ {
156
+ "messages": [
157
+ {"role": "user", "content": "What is the capital of France?"}
158
+ ]
159
+ }
160
+ ```
161
+
162
+ **Response:**
163
+ ```json
164
+ {
165
+ "output": "The capital of France is Paris.",
166
+ "messages": [
167
+ {"role": "user", "content": "What is the capital of France?"},
168
+ {"role": "assistant", "content": "The capital of France is Paris."}
169
+ ]
170
+ }
171
+ ```
172
+
173
+ ## Runtime Documentation
174
+
175
+ For information about the server, endpoints, request/response formats, and more, see the [`reminix-runtime`](https://pypi.org/project/reminix-runtime/) package.
176
+
177
+ ## Deployment
178
+
179
+ Ready to go live?
180
+
181
+ - **[Deploy to Reminix Cloud](https://reminix.com/docs/deployment)** - Zero-config cloud hosting
182
+ - **[Self-host](https://reminix.com/docs/deployment/self-hosting)** - Run on your own infrastructure
183
+
184
+ ## Links
185
+
186
+ - [GitHub Repository](https://github.com/reminix-ai/runtime-python)
187
+ - [LlamaIndex Documentation](https://docs.llamaindex.ai/)
188
+
189
+ ## License
190
+
191
+ Apache-2.0
@@ -0,0 +1,162 @@
1
+ # reminix-llamaindex
2
+
3
+ Reminix Runtime adapter for [LlamaIndex](https://www.llamaindex.ai/). Serve LlamaIndex chat engines as a REST API.
4
+
5
+ > **Ready to go live?** [Deploy to Reminix Cloud](https://reminix.com/docs/deployment) for zero-config hosting, or [self-host](https://reminix.com/docs/deployment/self-hosting) on your own infrastructure.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ pip install reminix-llamaindex
11
+ ```
12
+
13
+ This will also install `reminix-runtime` as a dependency.
14
+
15
+ ## Quick Start
16
+
17
+ ```python
18
+ from llama_index.core.chat_engine import SimpleChatEngine
19
+ from llama_index.llms.openai import OpenAI
20
+ from reminix_llamaindex import wrap_and_serve
21
+
22
+ llm = OpenAI(model="gpt-4o")
23
+ engine = SimpleChatEngine.from_defaults(llm=llm)
24
+ wrap_and_serve(engine, name="my-chatbot", port=8080)
25
+ ```
26
+
27
+ For more flexibility (e.g., serving multiple agents), use `wrap` and `serve` separately:
28
+
29
+ ```python
30
+ from llama_index.core.chat_engine import SimpleChatEngine
31
+ from llama_index.llms.openai import OpenAI
32
+ from reminix_llamaindex import wrap
33
+ from reminix_runtime import serve
34
+
35
+ llm = OpenAI(model="gpt-4o")
36
+ engine = SimpleChatEngine.from_defaults(llm=llm)
37
+ agent = wrap(engine, name="my-chatbot")
38
+ serve([agent], port=8080)
39
+ ```
40
+
41
+ Your agent is now available at:
42
+ - `POST /agents/my-chatbot/invoke` - Stateless invocation
43
+ - `POST /agents/my-chatbot/chat` - Conversational chat
44
+
45
+ ## API Reference
46
+
47
+ ### `wrap_and_serve(engine, name, port, host)`
48
+
49
+ Wrap a LlamaIndex chat engine and serve it immediately. Combines `wrap` and `serve` for single-agent setups.
50
+
51
+ | Parameter | Type | Default | Description |
52
+ |-----------|------|---------|-------------|
53
+ | `engine` | `BaseChatEngine` | required | A LlamaIndex chat engine |
54
+ | `name` | `str` | `"llamaindex-agent"` | Name for the agent (used in URL path) |
55
+ | `port` | `int` | `8080` | Port to serve on |
56
+ | `host` | `str` | `"0.0.0.0"` | Host to bind to |
57
+
58
+ ### `wrap(engine, name)`
59
+
60
+ Wrap a LlamaIndex chat engine for use with Reminix Runtime. Use this with `serve` from `reminix_runtime` for multi-agent setups.
61
+
62
+ | Parameter | Type | Default | Description |
63
+ |-----------|------|---------|-------------|
64
+ | `engine` | `BaseChatEngine` | required | A LlamaIndex chat engine |
65
+ | `name` | `str` | `"llamaindex-agent"` | Name for the agent (used in URL path) |
66
+
67
+ **Returns:** `LlamaIndexAdapter` - A Reminix adapter instance
68
+
69
+ ### Example with RAG
70
+
71
+ ```python
72
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
73
+ from llama_index.llms.openai import OpenAI
74
+ from reminix_llamaindex import wrap
75
+ from reminix_runtime import serve
76
+
77
+ # Load documents and create index
78
+ documents = SimpleDirectoryReader("./data").load_data()
79
+ index = VectorStoreIndex.from_documents(documents)
80
+
81
+ # Create a chat engine with the index
82
+ engine = index.as_chat_engine(llm=OpenAI(model="gpt-4o"))
83
+
84
+ # Wrap and serve
85
+ agent = wrap(engine, name="rag-chatbot")
86
+ serve([agent], port=8080)
87
+ ```
88
+
89
+ ## Endpoint Input/Output Formats
90
+
91
+ ### POST /agents/{name}/invoke
92
+
93
+ Stateless invocation for task-oriented operations.
94
+
95
+ **Request:**
96
+ ```json
97
+ {
98
+ "input": {
99
+ "query": "What is the capital of France?"
100
+ }
101
+ }
102
+ ```
103
+
104
+ Or with prompt:
105
+ ```json
106
+ {
107
+ "input": {
108
+ "prompt": "Summarize this text: ..."
109
+ }
110
+ }
111
+ ```
112
+
113
+ **Response:**
114
+ ```json
115
+ {
116
+ "output": "The capital of France is Paris."
117
+ }
118
+ ```
119
+
120
+ ### POST /agents/{name}/chat
121
+
122
+ Conversational chat. The adapter extracts the last user message.
123
+
124
+ **Request:**
125
+ ```json
126
+ {
127
+ "messages": [
128
+ {"role": "user", "content": "What is the capital of France?"}
129
+ ]
130
+ }
131
+ ```
132
+
133
+ **Response:**
134
+ ```json
135
+ {
136
+ "output": "The capital of France is Paris.",
137
+ "messages": [
138
+ {"role": "user", "content": "What is the capital of France?"},
139
+ {"role": "assistant", "content": "The capital of France is Paris."}
140
+ ]
141
+ }
142
+ ```
143
+
144
+ ## Runtime Documentation
145
+
146
+ For information about the server, endpoints, request/response formats, and more, see the [`reminix-runtime`](https://pypi.org/project/reminix-runtime/) package.
147
+
148
+ ## Deployment
149
+
150
+ Ready to go live?
151
+
152
+ - **[Deploy to Reminix Cloud](https://reminix.com/docs/deployment)** - Zero-config cloud hosting
153
+ - **[Self-host](https://reminix.com/docs/deployment/self-hosting)** - Run on your own infrastructure
154
+
155
+ ## Links
156
+
157
+ - [GitHub Repository](https://github.com/reminix-ai/runtime-python)
158
+ - [LlamaIndex Documentation](https://docs.llamaindex.ai/)
159
+
160
+ ## License
161
+
162
+ Apache-2.0
@@ -0,0 +1,52 @@
1
+ [project]
2
+ name = "reminix-llamaindex"
3
+ version = "0.0.4"
4
+ description = "Reminix adapter for LlamaIndex - serve agents as REST APIs"
5
+ readme = "README.md"
6
+ license = "Apache-2.0"
7
+ requires-python = ">=3.10"
8
+ authors = [{ name = "Reminix Team", email = "team@reminix.com" }]
9
+ keywords = ["ai", "agents", "llamaindex", "llama-index", "rag", "llm", "reminix"]
10
+ classifiers = [
11
+ "Development Status :: 4 - Beta",
12
+ "Intended Audience :: Developers",
13
+ "License :: OSI Approved :: Apache Software License",
14
+ "Programming Language :: Python :: 3",
15
+ "Programming Language :: Python :: 3.10",
16
+ "Programming Language :: Python :: 3.11",
17
+ "Programming Language :: Python :: 3.12",
18
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
19
+ ]
20
+ dependencies = [
21
+ "reminix-runtime~=0.0.4",
22
+ "llama-index>=0.14.0",
23
+ ]
24
+
25
+ [project.urls]
26
+ Homepage = "https://reminix.com"
27
+ Documentation = "https://reminix.com/docs"
28
+ Repository = "https://github.com/reminix-ai/runtime-python"
29
+ Changelog = "https://github.com/reminix-ai/runtime-python/blob/main/CHANGELOG.md"
30
+ "Bug Tracker" = "https://github.com/reminix-ai/runtime-python/issues"
31
+
32
+ [project.optional-dependencies]
33
+ dev = [
34
+ "pytest>=8.0.0",
35
+ "pytest-asyncio>=0.24.0",
36
+ ]
37
+
38
+ [tool.pytest.ini_options]
39
+ asyncio_mode = "auto"
40
+ testpaths = ["tests"]
41
+ python_files = ["test_*.py"]
42
+ python_functions = ["test_*"]
43
+
44
+ [build-system]
45
+ requires = ["hatchling"]
46
+ build-backend = "hatchling.build"
47
+
48
+ [tool.hatch.build.targets.wheel]
49
+ packages = ["src/reminix_llamaindex"]
50
+
51
+ [tool.uv.sources]
52
+ reminix-runtime = { workspace = true }
@@ -0,0 +1,3 @@
1
+ from .adapter import LlamaIndexAdapter, wrap, wrap_and_serve
2
+
3
+ __all__ = ["LlamaIndexAdapter", "wrap", "wrap_and_serve"]
@@ -0,0 +1,211 @@
1
+ """LlamaIndex adapter for Reminix Runtime."""
2
+
3
+ import json
4
+ from collections.abc import AsyncIterator
5
+ from typing import Any, Protocol, runtime_checkable
6
+
7
+ from reminix_runtime import (
8
+ BaseAdapter,
9
+ ChatRequest,
10
+ ChatResponse,
11
+ InvokeRequest,
12
+ InvokeResponse,
13
+ Message,
14
+ serve,
15
+ )
16
+
17
+
18
+ @runtime_checkable
19
+ class ChatEngine(Protocol):
20
+ """Protocol for LlamaIndex chat engines."""
21
+
22
+ async def achat(self, message: str) -> Any:
23
+ """Async chat method."""
24
+ ...
25
+
26
+ async def astream_chat(self, message: str) -> Any:
27
+ """Async streaming chat method."""
28
+ ...
29
+
30
+
31
+ class LlamaIndexAdapter(BaseAdapter):
32
+ """Adapter for LlamaIndex chat engines."""
33
+
34
+ adapter_name = "llamaindex"
35
+
36
+ def __init__(self, engine: ChatEngine, name: str = "llamaindex-agent") -> None:
37
+ """Initialize the adapter.
38
+
39
+ Args:
40
+ engine: A LlamaIndex chat engine (e.g., SimpleChatEngine, ContextChatEngine).
41
+ name: Name for the agent.
42
+ """
43
+ self._engine = engine
44
+ self._name = name
45
+
46
+ @property
47
+ def name(self) -> str:
48
+ return self._name
49
+
50
+ def _get_last_user_message(self, messages: list[Message]) -> str:
51
+ """Get the last user message from the conversation."""
52
+ for message in reversed(messages):
53
+ if message.role == "user":
54
+ return message.content or ""
55
+ # Fallback to last message if no user message found
56
+ return messages[-1].content or "" if messages else ""
57
+
58
+ async def invoke(self, request: InvokeRequest) -> InvokeResponse:
59
+ """Handle an invoke request.
60
+
61
+ For task-oriented operations. Expects input with 'query' or 'prompt' key.
62
+
63
+ Args:
64
+ request: The invoke request with input data.
65
+
66
+ Returns:
67
+ The invoke response with the output.
68
+ """
69
+ # Extract query from input
70
+ if "query" in request.input:
71
+ query = request.input["query"]
72
+ elif "prompt" in request.input:
73
+ query = request.input["prompt"]
74
+ elif "message" in request.input:
75
+ query = request.input["message"]
76
+ else:
77
+ query = str(request.input)
78
+
79
+ # Call the chat engine
80
+ response = await self._engine.achat(query)
81
+
82
+ # Extract content from response
83
+ output = str(response.response) if hasattr(response, "response") else str(response)
84
+
85
+ return InvokeResponse(output=output)
86
+
87
+ async def chat(self, request: ChatRequest) -> ChatResponse:
88
+ """Handle a chat request.
89
+
90
+ For conversational interactions. Sends the last user message to the engine.
91
+
92
+ Args:
93
+ request: The chat request with messages.
94
+
95
+ Returns:
96
+ The chat response with output and messages.
97
+ """
98
+ # Get the last user message to send to the engine
99
+ message = self._get_last_user_message(request.messages)
100
+
101
+ # Call the chat engine
102
+ response = await self._engine.achat(message)
103
+
104
+ # Extract content from response
105
+ output = str(response.response) if hasattr(response, "response") else str(response)
106
+
107
+ # Build response messages (original + assistant response)
108
+ response_messages: list[dict[str, Any]] = [
109
+ {"role": m.role, "content": m.content} for m in request.messages
110
+ ]
111
+ response_messages.append({"role": "assistant", "content": output})
112
+
113
+ return ChatResponse(output=output, messages=response_messages)
114
+
115
+ async def invoke_stream(self, request: InvokeRequest) -> AsyncIterator[str]:
116
+ """Handle a streaming invoke request.
117
+
118
+ Args:
119
+ request: The invoke request with input data.
120
+
121
+ Yields:
122
+ JSON-encoded chunks from the stream.
123
+ """
124
+ # Extract query from input
125
+ if "query" in request.input:
126
+ query = request.input["query"]
127
+ elif "prompt" in request.input:
128
+ query = request.input["prompt"]
129
+ elif "message" in request.input:
130
+ query = request.input["message"]
131
+ else:
132
+ query = str(request.input)
133
+
134
+ # Stream from the chat engine
135
+ response = await self._engine.astream_chat(query)
136
+ async for token in response.async_response_gen():
137
+ yield json.dumps({"chunk": token})
138
+
139
+ async def chat_stream(self, request: ChatRequest) -> AsyncIterator[str]:
140
+ """Handle a streaming chat request.
141
+
142
+ Args:
143
+ request: The chat request with messages.
144
+
145
+ Yields:
146
+ JSON-encoded chunks from the stream.
147
+ """
148
+ # Get the last user message to send to the engine
149
+ message = self._get_last_user_message(request.messages)
150
+
151
+ # Stream from the chat engine
152
+ response = await self._engine.astream_chat(message)
153
+ async for token in response.async_response_gen():
154
+ yield json.dumps({"chunk": token})
155
+
156
+
157
+ def wrap(engine: ChatEngine, name: str = "llamaindex-agent") -> LlamaIndexAdapter:
158
+ """Wrap a LlamaIndex chat engine for use with Reminix Runtime.
159
+
160
+ Args:
161
+ engine: A LlamaIndex chat engine (e.g., SimpleChatEngine, ContextChatEngine).
162
+ name: Name for the agent.
163
+
164
+ Returns:
165
+ A LlamaIndexAdapter instance.
166
+
167
+ Example:
168
+ ```python
169
+ from llama_index.core.chat_engine import SimpleChatEngine
170
+ from llama_index.llms.openai import OpenAI
171
+ from reminix_llamaindex import wrap
172
+ from reminix_runtime import serve
173
+
174
+ llm = OpenAI(model="gpt-4")
175
+ engine = SimpleChatEngine.from_defaults(llm=llm)
176
+ agent = wrap(engine, name="my-agent")
177
+ serve([agent], port=8080)
178
+ ```
179
+ """
180
+ return LlamaIndexAdapter(engine, name=name)
181
+
182
+
183
+ def wrap_and_serve(
184
+ engine: ChatEngine,
185
+ name: str = "llamaindex-agent",
186
+ port: int = 8080,
187
+ host: str = "0.0.0.0",
188
+ ) -> None:
189
+ """Wrap a LlamaIndex chat engine and serve it immediately.
190
+
191
+ This is a convenience function that combines `wrap` and `serve` for single-agent setups.
192
+
193
+ Args:
194
+ engine: A LlamaIndex chat engine (e.g., SimpleChatEngine, ContextChatEngine).
195
+ name: Name for the agent.
196
+ port: Port to serve on.
197
+ host: Host to bind to.
198
+
199
+ Example:
200
+ ```python
201
+ from llama_index.core.chat_engine import SimpleChatEngine
202
+ from llama_index.llms.openai import OpenAI
203
+ from reminix_llamaindex import wrap_and_serve
204
+
205
+ llm = OpenAI(model="gpt-4")
206
+ engine = SimpleChatEngine.from_defaults(llm=llm)
207
+ wrap_and_serve(engine, name="my-agent", port=8080)
208
+ ```
209
+ """
210
+ agent = wrap(engine, name=name)
211
+ serve([agent], port=port, host=host)
@@ -0,0 +1 @@
1
+ """Tests for reminix-llamaindex."""
@@ -0,0 +1,188 @@
1
+ """Tests for the LlamaIndex adapter."""
2
+
3
+ from unittest.mock import AsyncMock, MagicMock, patch
4
+
5
+ import pytest
6
+
7
+ from reminix_llamaindex import LlamaIndexAdapter, wrap, wrap_and_serve
8
+ from reminix_runtime import BaseAdapter, ChatRequest, InvokeRequest
9
+
10
+
11
+ class TestWrap:
12
+ """Tests for the wrap() function."""
13
+
14
+ def test_wrap_returns_adapter(self):
15
+ """wrap() should return a LlamaIndexAdapter."""
16
+ mock_engine = MagicMock()
17
+ mock_engine.achat = AsyncMock()
18
+ adapter = wrap(mock_engine)
19
+
20
+ assert isinstance(adapter, LlamaIndexAdapter)
21
+ assert isinstance(adapter, BaseAdapter)
22
+
23
+ def test_wrap_with_custom_name(self):
24
+ """wrap() should accept a custom name."""
25
+ mock_engine = MagicMock()
26
+ mock_engine.achat = AsyncMock()
27
+ adapter = wrap(mock_engine, name="my-custom-agent")
28
+
29
+ assert adapter.name == "my-custom-agent"
30
+
31
+ def test_wrap_default_name(self):
32
+ """wrap() should use default name if not provided."""
33
+ mock_engine = MagicMock()
34
+ mock_engine.achat = AsyncMock()
35
+ adapter = wrap(mock_engine)
36
+
37
+ assert adapter.name == "llamaindex-agent"
38
+
39
+
40
+ class TestLlamaIndexAdapterInvoke:
41
+ """Tests for the invoke() method."""
42
+
43
+ @pytest.mark.asyncio
44
+ async def test_invoke_calls_engine(self):
45
+ """invoke() should call the engine with query from input."""
46
+ mock_engine = MagicMock()
47
+ mock_response = MagicMock(response="Hello from LlamaIndex!")
48
+ mock_engine.achat = AsyncMock(return_value=mock_response)
49
+
50
+ adapter = wrap(mock_engine)
51
+ request = InvokeRequest(input={"query": "What is AI?"})
52
+
53
+ response = await adapter.invoke(request)
54
+
55
+ mock_engine.achat.assert_called_once_with("What is AI?")
56
+
57
+ @pytest.mark.asyncio
58
+ async def test_invoke_returns_output(self):
59
+ """invoke() should return the output from the engine."""
60
+ mock_engine = MagicMock()
61
+ mock_response = MagicMock(response="Hello from LlamaIndex!")
62
+ mock_engine.achat = AsyncMock(return_value=mock_response)
63
+
64
+ adapter = wrap(mock_engine)
65
+ request = InvokeRequest(input={"query": "Hi"})
66
+
67
+ response = await adapter.invoke(request)
68
+
69
+ assert response.output == "Hello from LlamaIndex!"
70
+
71
+ @pytest.mark.asyncio
72
+ async def test_invoke_with_prompt_input(self):
73
+ """invoke() should handle input with prompt key."""
74
+ mock_engine = MagicMock()
75
+ mock_response = MagicMock(response="Response")
76
+ mock_engine.achat = AsyncMock(return_value=mock_response)
77
+
78
+ adapter = wrap(mock_engine)
79
+ request = InvokeRequest(input={"prompt": "Tell me about AI"})
80
+
81
+ response = await adapter.invoke(request)
82
+
83
+ mock_engine.achat.assert_called_once_with("Tell me about AI")
84
+
85
+ @pytest.mark.asyncio
86
+ async def test_invoke_with_message_input(self):
87
+ """invoke() should handle input with message key."""
88
+ mock_engine = MagicMock()
89
+ mock_response = MagicMock(response="Response")
90
+ mock_engine.achat = AsyncMock(return_value=mock_response)
91
+
92
+ adapter = wrap(mock_engine)
93
+ request = InvokeRequest(input={"message": "Hello there"})
94
+
95
+ response = await adapter.invoke(request)
96
+
97
+ mock_engine.achat.assert_called_once_with("Hello there")
98
+
99
+
100
+ class TestLlamaIndexAdapterChat:
101
+ """Tests for the chat() method."""
102
+
103
+ @pytest.mark.asyncio
104
+ async def test_chat_calls_engine(self):
105
+ """chat() should call the engine with the last user message."""
106
+ mock_engine = MagicMock()
107
+ mock_response = MagicMock(response="Hello!")
108
+ mock_engine.achat = AsyncMock(return_value=mock_response)
109
+
110
+ adapter = wrap(mock_engine)
111
+ request = ChatRequest(messages=[{"role": "user", "content": "Hi"}])
112
+
113
+ response = await adapter.chat(request)
114
+
115
+ mock_engine.achat.assert_called_once_with("Hi")
116
+
117
+ @pytest.mark.asyncio
118
+ async def test_chat_returns_output_and_messages(self):
119
+ """chat() should return output and messages."""
120
+ mock_engine = MagicMock()
121
+ mock_response = MagicMock(response="Chat response")
122
+ mock_engine.achat = AsyncMock(return_value=mock_response)
123
+
124
+ adapter = wrap(mock_engine)
125
+ request = ChatRequest(messages=[{"role": "user", "content": "Hi"}])
126
+
127
+ response = await adapter.chat(request)
128
+
129
+ assert response.output == "Chat response"
130
+ assert len(response.messages) == 2
131
+ assert response.messages[-1]["role"] == "assistant"
132
+ assert response.messages[-1]["content"] == "Chat response"
133
+
134
+ @pytest.mark.asyncio
135
+ async def test_chat_uses_last_user_message(self):
136
+ """chat() should use the last user message in the conversation."""
137
+ mock_engine = MagicMock()
138
+ mock_response = MagicMock(response="Response")
139
+ mock_engine.achat = AsyncMock(return_value=mock_response)
140
+
141
+ adapter = wrap(mock_engine)
142
+ request = ChatRequest(
143
+ messages=[
144
+ {"role": "user", "content": "First message"},
145
+ {"role": "assistant", "content": "Response 1"},
146
+ {"role": "user", "content": "Second message"},
147
+ ]
148
+ )
149
+
150
+ await adapter.chat(request)
151
+
152
+ mock_engine.achat.assert_called_once_with("Second message")
153
+
154
+
155
+ class TestWrapAndServe:
156
+ """Tests for the wrap_and_serve() function."""
157
+
158
+ def test_wrap_and_serve_is_callable(self):
159
+ """wrap_and_serve() should be callable."""
160
+ assert callable(wrap_and_serve)
161
+
162
+ @patch("reminix_llamaindex.adapter.serve")
163
+ def test_wrap_and_serve_calls_serve(self, mock_serve):
164
+ """wrap_and_serve() should call serve with wrapped adapter."""
165
+ mock_engine = MagicMock()
166
+ mock_engine.achat = AsyncMock()
167
+
168
+ wrap_and_serve(mock_engine, name="test-agent")
169
+
170
+ mock_serve.assert_called_once()
171
+ call_args = mock_serve.call_args
172
+ agents = call_args[0][0]
173
+ assert len(agents) == 1
174
+ assert isinstance(agents[0], LlamaIndexAdapter)
175
+ assert agents[0].name == "test-agent"
176
+
177
+ @patch("reminix_llamaindex.adapter.serve")
178
+ def test_wrap_and_serve_passes_serve_options(self, mock_serve):
179
+ """wrap_and_serve() should pass port and host to serve."""
180
+ mock_engine = MagicMock()
181
+ mock_engine.achat = AsyncMock()
182
+
183
+ wrap_and_serve(mock_engine, name="test-agent", port=3000, host="localhost")
184
+
185
+ mock_serve.assert_called_once()
186
+ call_kwargs = mock_serve.call_args[1]
187
+ assert call_kwargs["port"] == 3000
188
+ assert call_kwargs["host"] == "localhost"