reminix-langgraph 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reminix_langgraph-0.0.1/.gitignore +208 -0
- reminix_langgraph-0.0.1/PKG-INFO +130 -0
- reminix_langgraph-0.0.1/README.md +116 -0
- reminix_langgraph-0.0.1/pyproject.toml +34 -0
- reminix_langgraph-0.0.1/src/reminix_langgraph/__init__.py +3 -0
- reminix_langgraph-0.0.1/src/reminix_langgraph/adapter.py +223 -0
- reminix_langgraph-0.0.1/src/reminix_langgraph/py.typed +0 -0
- reminix_langgraph-0.0.1/tests/__init__.py +1 -0
- reminix_langgraph-0.0.1/tests/test_adapter.py +153 -0
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[codz]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
share/python-wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
MANIFEST
|
|
28
|
+
|
|
29
|
+
# PyInstaller
|
|
30
|
+
# Usually these files are written by a python script from a template
|
|
31
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
32
|
+
*.manifest
|
|
33
|
+
*.spec
|
|
34
|
+
|
|
35
|
+
# Installer logs
|
|
36
|
+
pip-log.txt
|
|
37
|
+
pip-delete-this-directory.txt
|
|
38
|
+
|
|
39
|
+
# Unit test / coverage reports
|
|
40
|
+
htmlcov/
|
|
41
|
+
.tox/
|
|
42
|
+
.nox/
|
|
43
|
+
.coverage
|
|
44
|
+
.coverage.*
|
|
45
|
+
.cache
|
|
46
|
+
nosetests.xml
|
|
47
|
+
coverage.xml
|
|
48
|
+
*.cover
|
|
49
|
+
*.py.cover
|
|
50
|
+
.hypothesis/
|
|
51
|
+
.pytest_cache/
|
|
52
|
+
cover/
|
|
53
|
+
|
|
54
|
+
# Translations
|
|
55
|
+
*.mo
|
|
56
|
+
*.pot
|
|
57
|
+
|
|
58
|
+
# Django stuff:
|
|
59
|
+
*.log
|
|
60
|
+
local_settings.py
|
|
61
|
+
db.sqlite3
|
|
62
|
+
db.sqlite3-journal
|
|
63
|
+
|
|
64
|
+
# Flask stuff:
|
|
65
|
+
instance/
|
|
66
|
+
.webassets-cache
|
|
67
|
+
|
|
68
|
+
# Scrapy stuff:
|
|
69
|
+
.scrapy
|
|
70
|
+
|
|
71
|
+
# Sphinx documentation
|
|
72
|
+
docs/_build/
|
|
73
|
+
|
|
74
|
+
# PyBuilder
|
|
75
|
+
.pybuilder/
|
|
76
|
+
target/
|
|
77
|
+
|
|
78
|
+
# Jupyter Notebook
|
|
79
|
+
.ipynb_checkpoints
|
|
80
|
+
|
|
81
|
+
# IPython
|
|
82
|
+
profile_default/
|
|
83
|
+
ipython_config.py
|
|
84
|
+
|
|
85
|
+
# pyenv
|
|
86
|
+
# For a library or package, you might want to ignore these files since the code is
|
|
87
|
+
# intended to run in multiple environments; otherwise, check them in:
|
|
88
|
+
# .python-version
|
|
89
|
+
|
|
90
|
+
# pipenv
|
|
91
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
92
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
93
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
94
|
+
# install all needed dependencies.
|
|
95
|
+
#Pipfile.lock
|
|
96
|
+
|
|
97
|
+
# UV
|
|
98
|
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
|
99
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
|
+
# commonly ignored for libraries.
|
|
101
|
+
#uv.lock
|
|
102
|
+
|
|
103
|
+
# poetry
|
|
104
|
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
105
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
106
|
+
# commonly ignored for libraries.
|
|
107
|
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
108
|
+
#poetry.lock
|
|
109
|
+
#poetry.toml
|
|
110
|
+
|
|
111
|
+
# pdm
|
|
112
|
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
113
|
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
|
114
|
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
|
115
|
+
#pdm.lock
|
|
116
|
+
#pdm.toml
|
|
117
|
+
.pdm-python
|
|
118
|
+
.pdm-build/
|
|
119
|
+
|
|
120
|
+
# pixi
|
|
121
|
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
|
122
|
+
#pixi.lock
|
|
123
|
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
|
124
|
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
|
125
|
+
.pixi
|
|
126
|
+
|
|
127
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
128
|
+
__pypackages__/
|
|
129
|
+
|
|
130
|
+
# Celery stuff
|
|
131
|
+
celerybeat-schedule
|
|
132
|
+
celerybeat.pid
|
|
133
|
+
|
|
134
|
+
# SageMath parsed files
|
|
135
|
+
*.sage.py
|
|
136
|
+
|
|
137
|
+
# Environments
|
|
138
|
+
.env
|
|
139
|
+
!.env.example
|
|
140
|
+
.envrc
|
|
141
|
+
.venv
|
|
142
|
+
env/
|
|
143
|
+
venv/
|
|
144
|
+
ENV/
|
|
145
|
+
env.bak/
|
|
146
|
+
venv.bak/
|
|
147
|
+
|
|
148
|
+
# Spyder project settings
|
|
149
|
+
.spyderproject
|
|
150
|
+
.spyproject
|
|
151
|
+
|
|
152
|
+
# Rope project settings
|
|
153
|
+
.ropeproject
|
|
154
|
+
|
|
155
|
+
# mkdocs documentation
|
|
156
|
+
/site
|
|
157
|
+
|
|
158
|
+
# mypy
|
|
159
|
+
.mypy_cache/
|
|
160
|
+
.dmypy.json
|
|
161
|
+
dmypy.json
|
|
162
|
+
|
|
163
|
+
# Pyre type checker
|
|
164
|
+
.pyre/
|
|
165
|
+
|
|
166
|
+
# pytype static type analyzer
|
|
167
|
+
.pytype/
|
|
168
|
+
|
|
169
|
+
# Cython debug symbols
|
|
170
|
+
cython_debug/
|
|
171
|
+
|
|
172
|
+
# PyCharm
|
|
173
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
174
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
175
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
176
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
177
|
+
#.idea/
|
|
178
|
+
|
|
179
|
+
# Abstra
|
|
180
|
+
# Abstra is an AI-powered process automation framework.
|
|
181
|
+
# Ignore directories containing user credentials, local state, and settings.
|
|
182
|
+
# Learn more at https://abstra.io/docs
|
|
183
|
+
.abstra/
|
|
184
|
+
|
|
185
|
+
# Visual Studio Code
|
|
186
|
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
|
187
|
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
|
188
|
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
|
189
|
+
# you could uncomment the following to ignore the entire vscode folder
|
|
190
|
+
# .vscode/
|
|
191
|
+
|
|
192
|
+
# Ruff stuff:
|
|
193
|
+
.ruff_cache/
|
|
194
|
+
|
|
195
|
+
# PyPI configuration file
|
|
196
|
+
.pypirc
|
|
197
|
+
|
|
198
|
+
# Cursor
|
|
199
|
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
|
200
|
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
|
201
|
+
# refer to https://docs.cursor.com/context/ignore-files
|
|
202
|
+
.cursorignore
|
|
203
|
+
.cursorindexingignore
|
|
204
|
+
|
|
205
|
+
# Marimo
|
|
206
|
+
marimo/_static/
|
|
207
|
+
marimo/_lsp/
|
|
208
|
+
__marimo__/
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: reminix-langgraph
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Reminix adapter for LangGraph
|
|
5
|
+
License-Expression: Apache-2.0
|
|
6
|
+
Requires-Python: >=3.10
|
|
7
|
+
Requires-Dist: langgraph>=1.0.0
|
|
8
|
+
Requires-Dist: reminix-runtime~=0.0.1
|
|
9
|
+
Provides-Extra: dev
|
|
10
|
+
Requires-Dist: langchain-core>=1.2.0; extra == 'dev'
|
|
11
|
+
Requires-Dist: pytest-asyncio>=0.24.0; extra == 'dev'
|
|
12
|
+
Requires-Dist: pytest>=8.0.0; extra == 'dev'
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
# reminix-langgraph
|
|
16
|
+
|
|
17
|
+
Reminix Runtime adapter for [LangGraph](https://langchain-ai.github.io/langgraph/). Deploy any LangGraph agent as a REST API.
|
|
18
|
+
|
|
19
|
+
## Installation
|
|
20
|
+
|
|
21
|
+
```bash
|
|
22
|
+
pip install reminix-langgraph
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
This will also install `reminix-runtime` as a dependency.
|
|
26
|
+
|
|
27
|
+
## Quick Start
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from langgraph.prebuilt import create_react_agent
|
|
31
|
+
from langchain_openai import ChatOpenAI
|
|
32
|
+
from reminix_langgraph import wrap
|
|
33
|
+
from reminix_runtime import serve
|
|
34
|
+
|
|
35
|
+
# Create a LangGraph agent
|
|
36
|
+
llm = ChatOpenAI(model="gpt-4o")
|
|
37
|
+
graph = create_react_agent(llm, tools=[])
|
|
38
|
+
|
|
39
|
+
# Wrap it with the Reminix adapter
|
|
40
|
+
agent = wrap(graph, name="my-agent")
|
|
41
|
+
|
|
42
|
+
# Serve it as a REST API
|
|
43
|
+
serve([agent], port=8080)
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Your agent is now available at:
|
|
47
|
+
- `POST /agents/my-agent/invoke` - Stateless invocation
|
|
48
|
+
- `POST /agents/my-agent/chat` - Conversational chat
|
|
49
|
+
|
|
50
|
+
## API Reference
|
|
51
|
+
|
|
52
|
+
### `wrap(graph, name)`
|
|
53
|
+
|
|
54
|
+
Wrap a LangGraph compiled graph for use with Reminix Runtime.
|
|
55
|
+
|
|
56
|
+
| Parameter | Type | Default | Description |
|
|
57
|
+
|-----------|------|---------|-------------|
|
|
58
|
+
| `graph` | `CompiledGraph` | required | A LangGraph compiled graph |
|
|
59
|
+
| `name` | `str` | `"langgraph-agent"` | Name for the agent (used in URL path) |
|
|
60
|
+
|
|
61
|
+
**Returns:** `LangGraphAdapter` - A Reminix adapter instance
|
|
62
|
+
|
|
63
|
+
### How It Works
|
|
64
|
+
|
|
65
|
+
LangGraph uses a state-based approach. The adapter:
|
|
66
|
+
1. Converts incoming messages to LangChain message format
|
|
67
|
+
2. Invokes the graph with `{"messages": [...]}`
|
|
68
|
+
3. Extracts the last AI message from the response
|
|
69
|
+
4. Returns it in the Reminix response format
|
|
70
|
+
|
|
71
|
+
## Endpoint Input/Output Formats
|
|
72
|
+
|
|
73
|
+
### POST /agents/{name}/invoke
|
|
74
|
+
|
|
75
|
+
Stateless invocation. Input is passed directly to the graph.
|
|
76
|
+
|
|
77
|
+
**Request:**
|
|
78
|
+
```json
|
|
79
|
+
{
|
|
80
|
+
"input": {
|
|
81
|
+
"messages": [
|
|
82
|
+
{"role": "user", "content": "Hello!"}
|
|
83
|
+
]
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
**Response:**
|
|
89
|
+
```json
|
|
90
|
+
{
|
|
91
|
+
"output": "Hello! How can I help you today?"
|
|
92
|
+
}
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### POST /agents/{name}/chat
|
|
96
|
+
|
|
97
|
+
Conversational chat with message history.
|
|
98
|
+
|
|
99
|
+
**Request:**
|
|
100
|
+
```json
|
|
101
|
+
{
|
|
102
|
+
"messages": [
|
|
103
|
+
{"role": "user", "content": "What is the capital of France?"}
|
|
104
|
+
]
|
|
105
|
+
}
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
**Response:**
|
|
109
|
+
```json
|
|
110
|
+
{
|
|
111
|
+
"output": "The capital of France is Paris.",
|
|
112
|
+
"messages": [
|
|
113
|
+
{"role": "user", "content": "What is the capital of France?"},
|
|
114
|
+
{"role": "assistant", "content": "The capital of France is Paris."}
|
|
115
|
+
]
|
|
116
|
+
}
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
## Runtime Documentation
|
|
120
|
+
|
|
121
|
+
For information about the server, endpoints, request/response formats, and more, see the [`reminix-runtime`](https://pypi.org/project/reminix-runtime/) package.
|
|
122
|
+
|
|
123
|
+
## Links
|
|
124
|
+
|
|
125
|
+
- [GitHub Repository](https://github.com/reminix-ai/runtime-python)
|
|
126
|
+
- [LangGraph Documentation](https://langchain-ai.github.io/langgraph/)
|
|
127
|
+
|
|
128
|
+
## License
|
|
129
|
+
|
|
130
|
+
Apache-2.0
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# reminix-langgraph
|
|
2
|
+
|
|
3
|
+
Reminix Runtime adapter for [LangGraph](https://langchain-ai.github.io/langgraph/). Deploy any LangGraph agent as a REST API.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install reminix-langgraph
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
This will also install `reminix-runtime` as a dependency.
|
|
12
|
+
|
|
13
|
+
## Quick Start
|
|
14
|
+
|
|
15
|
+
```python
|
|
16
|
+
from langgraph.prebuilt import create_react_agent
|
|
17
|
+
from langchain_openai import ChatOpenAI
|
|
18
|
+
from reminix_langgraph import wrap
|
|
19
|
+
from reminix_runtime import serve
|
|
20
|
+
|
|
21
|
+
# Create a LangGraph agent
|
|
22
|
+
llm = ChatOpenAI(model="gpt-4o")
|
|
23
|
+
graph = create_react_agent(llm, tools=[])
|
|
24
|
+
|
|
25
|
+
# Wrap it with the Reminix adapter
|
|
26
|
+
agent = wrap(graph, name="my-agent")
|
|
27
|
+
|
|
28
|
+
# Serve it as a REST API
|
|
29
|
+
serve([agent], port=8080)
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
Your agent is now available at:
|
|
33
|
+
- `POST /agents/my-agent/invoke` - Stateless invocation
|
|
34
|
+
- `POST /agents/my-agent/chat` - Conversational chat
|
|
35
|
+
|
|
36
|
+
## API Reference
|
|
37
|
+
|
|
38
|
+
### `wrap(graph, name)`
|
|
39
|
+
|
|
40
|
+
Wrap a LangGraph compiled graph for use with Reminix Runtime.
|
|
41
|
+
|
|
42
|
+
| Parameter | Type | Default | Description |
|
|
43
|
+
|-----------|------|---------|-------------|
|
|
44
|
+
| `graph` | `CompiledGraph` | required | A LangGraph compiled graph |
|
|
45
|
+
| `name` | `str` | `"langgraph-agent"` | Name for the agent (used in URL path) |
|
|
46
|
+
|
|
47
|
+
**Returns:** `LangGraphAdapter` - A Reminix adapter instance
|
|
48
|
+
|
|
49
|
+
### How It Works
|
|
50
|
+
|
|
51
|
+
LangGraph uses a state-based approach. The adapter:
|
|
52
|
+
1. Converts incoming messages to LangChain message format
|
|
53
|
+
2. Invokes the graph with `{"messages": [...]}`
|
|
54
|
+
3. Extracts the last AI message from the response
|
|
55
|
+
4. Returns it in the Reminix response format
|
|
56
|
+
|
|
57
|
+
## Endpoint Input/Output Formats
|
|
58
|
+
|
|
59
|
+
### POST /agents/{name}/invoke
|
|
60
|
+
|
|
61
|
+
Stateless invocation. Input is passed directly to the graph.
|
|
62
|
+
|
|
63
|
+
**Request:**
|
|
64
|
+
```json
|
|
65
|
+
{
|
|
66
|
+
"input": {
|
|
67
|
+
"messages": [
|
|
68
|
+
{"role": "user", "content": "Hello!"}
|
|
69
|
+
]
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
**Response:**
|
|
75
|
+
```json
|
|
76
|
+
{
|
|
77
|
+
"output": "Hello! How can I help you today?"
|
|
78
|
+
}
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### POST /agents/{name}/chat
|
|
82
|
+
|
|
83
|
+
Conversational chat with message history.
|
|
84
|
+
|
|
85
|
+
**Request:**
|
|
86
|
+
```json
|
|
87
|
+
{
|
|
88
|
+
"messages": [
|
|
89
|
+
{"role": "user", "content": "What is the capital of France?"}
|
|
90
|
+
]
|
|
91
|
+
}
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
**Response:**
|
|
95
|
+
```json
|
|
96
|
+
{
|
|
97
|
+
"output": "The capital of France is Paris.",
|
|
98
|
+
"messages": [
|
|
99
|
+
{"role": "user", "content": "What is the capital of France?"},
|
|
100
|
+
{"role": "assistant", "content": "The capital of France is Paris."}
|
|
101
|
+
]
|
|
102
|
+
}
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
## Runtime Documentation
|
|
106
|
+
|
|
107
|
+
For information about the server, endpoints, request/response formats, and more, see the [`reminix-runtime`](https://pypi.org/project/reminix-runtime/) package.
|
|
108
|
+
|
|
109
|
+
## Links
|
|
110
|
+
|
|
111
|
+
- [GitHub Repository](https://github.com/reminix-ai/runtime-python)
|
|
112
|
+
- [LangGraph Documentation](https://langchain-ai.github.io/langgraph/)
|
|
113
|
+
|
|
114
|
+
## License
|
|
115
|
+
|
|
116
|
+
Apache-2.0
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "reminix-langgraph"
|
|
3
|
+
version = "0.0.1"
|
|
4
|
+
description = "Reminix adapter for LangGraph"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
license = "Apache-2.0"
|
|
7
|
+
requires-python = ">=3.10"
|
|
8
|
+
dependencies = [
|
|
9
|
+
"reminix-runtime~=0.0.1",
|
|
10
|
+
"langgraph>=1.0.0",
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
[project.optional-dependencies]
|
|
14
|
+
dev = [
|
|
15
|
+
"pytest>=8.0.0",
|
|
16
|
+
"pytest-asyncio>=0.24.0",
|
|
17
|
+
"langchain-core>=1.2.0",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
[tool.pytest.ini_options]
|
|
21
|
+
asyncio_mode = "auto"
|
|
22
|
+
testpaths = ["tests"]
|
|
23
|
+
python_files = ["test_*.py"]
|
|
24
|
+
python_functions = ["test_*"]
|
|
25
|
+
|
|
26
|
+
[build-system]
|
|
27
|
+
requires = ["hatchling"]
|
|
28
|
+
build-backend = "hatchling.build"
|
|
29
|
+
|
|
30
|
+
[tool.hatch.build.targets.wheel]
|
|
31
|
+
packages = ["src/reminix_langgraph"]
|
|
32
|
+
|
|
33
|
+
[tool.uv.sources]
|
|
34
|
+
reminix-runtime = { workspace = true }
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
"""LangGraph adapter for Reminix Runtime."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from collections.abc import AsyncIterator
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from langchain_core.messages import (
|
|
8
|
+
AIMessage,
|
|
9
|
+
AIMessageChunk,
|
|
10
|
+
BaseMessage,
|
|
11
|
+
HumanMessage,
|
|
12
|
+
SystemMessage,
|
|
13
|
+
ToolMessage,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
from reminix_runtime import (
|
|
17
|
+
BaseAdapter,
|
|
18
|
+
ChatRequest,
|
|
19
|
+
ChatResponse,
|
|
20
|
+
InvokeRequest,
|
|
21
|
+
InvokeResponse,
|
|
22
|
+
Message,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class LangGraphAdapter(BaseAdapter):
|
|
27
|
+
"""Adapter for LangGraph compiled graphs."""
|
|
28
|
+
|
|
29
|
+
adapter_name = "langgraph"
|
|
30
|
+
|
|
31
|
+
def __init__(self, graph: Any, name: str = "langgraph-agent") -> None:
|
|
32
|
+
"""Initialize the adapter.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
graph: A LangGraph compiled graph.
|
|
36
|
+
name: Name for the agent.
|
|
37
|
+
"""
|
|
38
|
+
self._graph = graph
|
|
39
|
+
self._name = name
|
|
40
|
+
|
|
41
|
+
@property
|
|
42
|
+
def name(self) -> str:
|
|
43
|
+
return self._name
|
|
44
|
+
|
|
45
|
+
def _to_langchain_message(self, message: Message) -> BaseMessage:
|
|
46
|
+
"""Convert a Reminix message to a LangChain message."""
|
|
47
|
+
role = message.role
|
|
48
|
+
content = message.content or ""
|
|
49
|
+
|
|
50
|
+
if role == "user":
|
|
51
|
+
return HumanMessage(content=content)
|
|
52
|
+
elif role == "assistant":
|
|
53
|
+
return AIMessage(content=content)
|
|
54
|
+
elif role == "system":
|
|
55
|
+
return SystemMessage(content=content)
|
|
56
|
+
elif role == "tool":
|
|
57
|
+
tool_call_id = getattr(message, "tool_call_id", None) or "unknown"
|
|
58
|
+
return ToolMessage(content=content, tool_call_id=tool_call_id)
|
|
59
|
+
else:
|
|
60
|
+
return HumanMessage(content=content)
|
|
61
|
+
|
|
62
|
+
def _to_reminix_message(self, message: BaseMessage) -> dict[str, Any]:
|
|
63
|
+
"""Convert a LangChain message to a Reminix message dict."""
|
|
64
|
+
if isinstance(message, HumanMessage):
|
|
65
|
+
role = "user"
|
|
66
|
+
elif isinstance(message, AIMessage):
|
|
67
|
+
role = "assistant"
|
|
68
|
+
elif isinstance(message, SystemMessage):
|
|
69
|
+
role = "system"
|
|
70
|
+
elif isinstance(message, ToolMessage):
|
|
71
|
+
role = "tool"
|
|
72
|
+
else:
|
|
73
|
+
role = "assistant"
|
|
74
|
+
|
|
75
|
+
content = message.content if isinstance(message.content, str) else str(message.content)
|
|
76
|
+
return {"role": role, "content": content}
|
|
77
|
+
|
|
78
|
+
def _get_last_ai_content(self, messages: list[BaseMessage]) -> str:
|
|
79
|
+
"""Extract content from the last AI message."""
|
|
80
|
+
for message in reversed(messages):
|
|
81
|
+
if isinstance(message, AIMessage):
|
|
82
|
+
return message.content if isinstance(message.content, str) else str(message.content)
|
|
83
|
+
return ""
|
|
84
|
+
|
|
85
|
+
async def invoke(self, request: InvokeRequest) -> InvokeResponse:
|
|
86
|
+
"""Handle an invoke request.
|
|
87
|
+
|
|
88
|
+
For task-oriented operations. Passes the input directly to the graph.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
request: The invoke request with input data.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
The invoke response with the output.
|
|
95
|
+
"""
|
|
96
|
+
# Pass input directly to the graph
|
|
97
|
+
result = await self._graph.ainvoke(request.input)
|
|
98
|
+
|
|
99
|
+
# Extract output from result
|
|
100
|
+
if isinstance(result, dict) and "messages" in result:
|
|
101
|
+
messages = result.get("messages", [])
|
|
102
|
+
output = self._get_last_ai_content(messages)
|
|
103
|
+
elif isinstance(result, dict):
|
|
104
|
+
output = result
|
|
105
|
+
else:
|
|
106
|
+
output = str(result)
|
|
107
|
+
|
|
108
|
+
return InvokeResponse(output=output)
|
|
109
|
+
|
|
110
|
+
async def chat(self, request: ChatRequest) -> ChatResponse:
|
|
111
|
+
"""Handle a chat request.
|
|
112
|
+
|
|
113
|
+
For conversational interactions. Converts messages to LangChain format
|
|
114
|
+
and invokes the graph with the state dict format.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
request: The chat request with messages.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
The chat response with output and messages.
|
|
121
|
+
"""
|
|
122
|
+
# Convert messages to LangChain format
|
|
123
|
+
lc_messages = [self._to_langchain_message(m) for m in request.messages]
|
|
124
|
+
|
|
125
|
+
# Call the graph with state dict format
|
|
126
|
+
result = await self._graph.ainvoke({"messages": lc_messages})
|
|
127
|
+
|
|
128
|
+
# Extract messages from result
|
|
129
|
+
result_messages: list[BaseMessage] = result.get("messages", [])
|
|
130
|
+
|
|
131
|
+
# Get content from the last AI message
|
|
132
|
+
output = self._get_last_ai_content(result_messages)
|
|
133
|
+
|
|
134
|
+
# Convert all messages back to Reminix format
|
|
135
|
+
response_messages = [self._to_reminix_message(m) for m in result_messages]
|
|
136
|
+
|
|
137
|
+
return ChatResponse(output=output, messages=response_messages)
|
|
138
|
+
|
|
139
|
+
async def invoke_stream(self, request: InvokeRequest) -> AsyncIterator[str]:
|
|
140
|
+
"""Handle a streaming invoke request.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
request: The invoke request with input data.
|
|
144
|
+
|
|
145
|
+
Yields:
|
|
146
|
+
JSON-encoded chunks from the stream.
|
|
147
|
+
"""
|
|
148
|
+
async for chunk in self._graph.astream(request.input):
|
|
149
|
+
# LangGraph streams dicts with node outputs
|
|
150
|
+
if isinstance(chunk, dict):
|
|
151
|
+
for _node_name, node_output in chunk.items():
|
|
152
|
+
if isinstance(node_output, dict) and "messages" in node_output:
|
|
153
|
+
for msg in node_output["messages"]:
|
|
154
|
+
if isinstance(msg, (AIMessage, AIMessageChunk)):
|
|
155
|
+
content = (
|
|
156
|
+
msg.content
|
|
157
|
+
if isinstance(msg.content, str)
|
|
158
|
+
else str(msg.content)
|
|
159
|
+
)
|
|
160
|
+
if content:
|
|
161
|
+
yield json.dumps({"chunk": content})
|
|
162
|
+
else:
|
|
163
|
+
yield json.dumps({"chunk": json.dumps(node_output)})
|
|
164
|
+
else:
|
|
165
|
+
yield json.dumps({"chunk": str(chunk)})
|
|
166
|
+
|
|
167
|
+
async def chat_stream(self, request: ChatRequest) -> AsyncIterator[str]:
|
|
168
|
+
"""Handle a streaming chat request.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
request: The chat request with messages.
|
|
172
|
+
|
|
173
|
+
Yields:
|
|
174
|
+
JSON-encoded chunks from the stream.
|
|
175
|
+
"""
|
|
176
|
+
# Convert messages to LangChain format
|
|
177
|
+
lc_messages = [self._to_langchain_message(m) for m in request.messages]
|
|
178
|
+
|
|
179
|
+
# Stream from the graph
|
|
180
|
+
async for chunk in self._graph.astream({"messages": lc_messages}):
|
|
181
|
+
# LangGraph streams dicts with node outputs
|
|
182
|
+
if isinstance(chunk, dict):
|
|
183
|
+
for _node_name, node_output in chunk.items():
|
|
184
|
+
if isinstance(node_output, dict) and "messages" in node_output:
|
|
185
|
+
for msg in node_output["messages"]:
|
|
186
|
+
if isinstance(msg, (AIMessage, AIMessageChunk)):
|
|
187
|
+
content = (
|
|
188
|
+
msg.content
|
|
189
|
+
if isinstance(msg.content, str)
|
|
190
|
+
else str(msg.content)
|
|
191
|
+
)
|
|
192
|
+
if content:
|
|
193
|
+
yield json.dumps({"chunk": content})
|
|
194
|
+
else:
|
|
195
|
+
yield json.dumps({"chunk": json.dumps(node_output)})
|
|
196
|
+
else:
|
|
197
|
+
yield json.dumps({"chunk": str(chunk)})
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def wrap(graph: Any, name: str = "langgraph-agent") -> LangGraphAdapter:
|
|
201
|
+
"""Wrap a LangGraph compiled graph for use with Reminix Runtime.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
graph: A LangGraph compiled graph.
|
|
205
|
+
name: Name for the agent.
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
A LangGraphAdapter instance.
|
|
209
|
+
|
|
210
|
+
Example:
|
|
211
|
+
```python
|
|
212
|
+
from langgraph.prebuilt import create_react_agent
|
|
213
|
+
from langchain_openai import ChatOpenAI
|
|
214
|
+
from reminix_langgraph import wrap
|
|
215
|
+
from reminix_runtime import serve
|
|
216
|
+
|
|
217
|
+
llm = ChatOpenAI(model="gpt-4")
|
|
218
|
+
graph = create_react_agent(llm, tools=[])
|
|
219
|
+
agent = wrap(graph, name="my-agent")
|
|
220
|
+
serve([agent], port=8080)
|
|
221
|
+
```
|
|
222
|
+
"""
|
|
223
|
+
return LangGraphAdapter(graph, name=name)
|
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Tests for reminix-langgraph."""
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
"""Tests for the LangGraph adapter."""
|
|
2
|
+
|
|
3
|
+
from unittest.mock import AsyncMock, MagicMock
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
|
|
7
|
+
|
|
8
|
+
from reminix_langgraph import LangGraphAdapter, wrap
|
|
9
|
+
from reminix_runtime import BaseAdapter, ChatRequest, InvokeRequest
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class TestWrap:
|
|
13
|
+
"""Tests for the wrap() function."""
|
|
14
|
+
|
|
15
|
+
def test_wrap_returns_adapter(self):
|
|
16
|
+
"""wrap() should return a LangGraphAdapter."""
|
|
17
|
+
mock_graph = MagicMock()
|
|
18
|
+
adapter = wrap(mock_graph)
|
|
19
|
+
|
|
20
|
+
assert isinstance(adapter, LangGraphAdapter)
|
|
21
|
+
assert isinstance(adapter, BaseAdapter)
|
|
22
|
+
|
|
23
|
+
def test_wrap_with_custom_name(self):
|
|
24
|
+
"""wrap() should accept a custom name."""
|
|
25
|
+
mock_graph = MagicMock()
|
|
26
|
+
adapter = wrap(mock_graph, name="my-custom-agent")
|
|
27
|
+
|
|
28
|
+
assert adapter.name == "my-custom-agent"
|
|
29
|
+
|
|
30
|
+
def test_wrap_default_name(self):
|
|
31
|
+
"""wrap() should use default name if not provided."""
|
|
32
|
+
mock_graph = MagicMock()
|
|
33
|
+
adapter = wrap(mock_graph)
|
|
34
|
+
|
|
35
|
+
assert adapter.name == "langgraph-agent"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class TestLangGraphAdapterInvoke:
|
|
39
|
+
"""Tests for the invoke() method."""
|
|
40
|
+
|
|
41
|
+
@pytest.mark.asyncio
|
|
42
|
+
async def test_invoke_calls_graph(self):
|
|
43
|
+
"""invoke() should call the underlying graph with the input."""
|
|
44
|
+
mock_graph = MagicMock()
|
|
45
|
+
mock_graph.ainvoke = AsyncMock(return_value={"messages": [AIMessage(content="Hello!")]})
|
|
46
|
+
|
|
47
|
+
adapter = wrap(mock_graph)
|
|
48
|
+
request = InvokeRequest(input={"query": "What is AI?"})
|
|
49
|
+
|
|
50
|
+
response = await adapter.invoke(request)
|
|
51
|
+
|
|
52
|
+
mock_graph.ainvoke.assert_called_once_with({"query": "What is AI?"})
|
|
53
|
+
|
|
54
|
+
@pytest.mark.asyncio
|
|
55
|
+
async def test_invoke_returns_output_from_messages(self):
|
|
56
|
+
"""invoke() should extract output from messages in the result."""
|
|
57
|
+
mock_graph = MagicMock()
|
|
58
|
+
mock_graph.ainvoke = AsyncMock(
|
|
59
|
+
return_value={
|
|
60
|
+
"messages": [HumanMessage(content="Hello"), AIMessage(content="Hi there!")]
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
adapter = wrap(mock_graph)
|
|
65
|
+
request = InvokeRequest(input={"messages": []})
|
|
66
|
+
|
|
67
|
+
response = await adapter.invoke(request)
|
|
68
|
+
|
|
69
|
+
assert response.output == "Hi there!"
|
|
70
|
+
|
|
71
|
+
@pytest.mark.asyncio
|
|
72
|
+
async def test_invoke_handles_dict_result(self):
|
|
73
|
+
"""invoke() should handle dict results without messages."""
|
|
74
|
+
mock_graph = MagicMock()
|
|
75
|
+
mock_graph.ainvoke = AsyncMock(return_value={"result": "success"})
|
|
76
|
+
|
|
77
|
+
adapter = wrap(mock_graph)
|
|
78
|
+
request = InvokeRequest(input={"task": "compute"})
|
|
79
|
+
|
|
80
|
+
response = await adapter.invoke(request)
|
|
81
|
+
|
|
82
|
+
assert response.output == {"result": "success"}
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class TestLangGraphAdapterChat:
|
|
86
|
+
"""Tests for the chat() method."""
|
|
87
|
+
|
|
88
|
+
@pytest.mark.asyncio
|
|
89
|
+
async def test_chat_calls_graph_with_state_dict(self):
|
|
90
|
+
"""chat() should call the graph with state dict format."""
|
|
91
|
+
mock_graph = MagicMock()
|
|
92
|
+
mock_graph.ainvoke = AsyncMock(return_value={"messages": [AIMessage(content="Hello!")]})
|
|
93
|
+
|
|
94
|
+
adapter = wrap(mock_graph)
|
|
95
|
+
request = ChatRequest(messages=[{"role": "user", "content": "Hi"}])
|
|
96
|
+
|
|
97
|
+
response = await adapter.chat(request)
|
|
98
|
+
|
|
99
|
+
# Should be called with {"messages": [...]}
|
|
100
|
+
call_args = mock_graph.ainvoke.call_args[0][0]
|
|
101
|
+
assert "messages" in call_args
|
|
102
|
+
assert len(call_args["messages"]) == 1
|
|
103
|
+
assert isinstance(call_args["messages"][0], HumanMessage)
|
|
104
|
+
|
|
105
|
+
@pytest.mark.asyncio
|
|
106
|
+
async def test_chat_returns_output_and_messages(self):
|
|
107
|
+
"""chat() should return output and all messages from the graph."""
|
|
108
|
+
mock_graph = MagicMock()
|
|
109
|
+
mock_graph.ainvoke = AsyncMock(
|
|
110
|
+
return_value={
|
|
111
|
+
"messages": [
|
|
112
|
+
HumanMessage(content="Hi"),
|
|
113
|
+
AIMessage(content="Hello! How can I help?"),
|
|
114
|
+
]
|
|
115
|
+
}
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
adapter = wrap(mock_graph)
|
|
119
|
+
request = ChatRequest(messages=[{"role": "user", "content": "Hi"}])
|
|
120
|
+
|
|
121
|
+
response = await adapter.chat(request)
|
|
122
|
+
|
|
123
|
+
assert response.output == "Hello! How can I help?"
|
|
124
|
+
assert len(response.messages) == 2
|
|
125
|
+
assert response.messages[-1]["role"] == "assistant"
|
|
126
|
+
|
|
127
|
+
@pytest.mark.asyncio
|
|
128
|
+
async def test_chat_converts_messages_correctly(self):
|
|
129
|
+
"""chat() should convert messages to/from LangChain format."""
|
|
130
|
+
mock_graph = MagicMock()
|
|
131
|
+
mock_graph.ainvoke = AsyncMock(
|
|
132
|
+
return_value={
|
|
133
|
+
"messages": [
|
|
134
|
+
SystemMessage(content="You are helpful"),
|
|
135
|
+
HumanMessage(content="Hello"),
|
|
136
|
+
AIMessage(content="Hi!"),
|
|
137
|
+
]
|
|
138
|
+
}
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
adapter = wrap(mock_graph)
|
|
142
|
+
request = ChatRequest(
|
|
143
|
+
messages=[
|
|
144
|
+
{"role": "system", "content": "You are helpful"},
|
|
145
|
+
{"role": "user", "content": "Hello"},
|
|
146
|
+
]
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
response = await adapter.chat(request)
|
|
150
|
+
|
|
151
|
+
assert response.messages[0]["role"] == "system"
|
|
152
|
+
assert response.messages[1]["role"] == "user"
|
|
153
|
+
assert response.messages[2]["role"] == "assistant"
|