gohumanloop 0.0.6__tar.gz → 0.0.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {gohumanloop-0.0.6/gohumanloop.egg-info → gohumanloop-0.0.7}/PKG-INFO +44 -5
- gohumanloop-0.0.6/PKG-INFO → gohumanloop-0.0.7/README.md +41 -27
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/adapters/__init__.py +2 -2
- gohumanloop-0.0.6/gohumanloop/adapters/langgraph_adapter.py → gohumanloop-0.0.7/gohumanloop/adapters/base_adapter.py +128 -267
- gohumanloop-0.0.7/gohumanloop/adapters/langgraph_adapter.py +344 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/core/interface.py +16 -5
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/core/manager.py +43 -1
- gohumanloop-0.0.6/README.md → gohumanloop-0.0.7/gohumanloop.egg-info/PKG-INFO +66 -4
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop.egg-info/SOURCES.txt +1 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop.egg-info/requires.txt +3 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/pyproject.toml +4 -1
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/LICENSE +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/__init__.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/__main__.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/cli/__init__.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/cli/main.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/core/__init__.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/manager/__init__.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/manager/ghl_manager.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/models/__init__.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/models/api_model.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/models/glh_model.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/providers/__init__.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/providers/api_provider.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/providers/base.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/providers/email_provider.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/providers/ghl_provider.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/providers/terminal_provider.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/utils/__init__.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/utils/context_formatter.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/utils/threadsafedict.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop/utils/utils.py +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop.egg-info/dependency_links.txt +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop.egg-info/entry_points.txt +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/gohumanloop.egg-info/top_level.txt +0 -0
- {gohumanloop-0.0.6 → gohumanloop-0.0.7}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: gohumanloop
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.7
|
4
4
|
Summary: Perfecting AI workflows with human intelligence
|
5
5
|
Author-email: gohumanloop authors <baird0917@163.com>
|
6
6
|
Project-URL: repository, https://github.com/ptonlix/gohumanloop
|
@@ -19,6 +19,8 @@ Requires-Dist: langgraph>=0.3.30; extra == "langgraph"
|
|
19
19
|
Provides-Extra: apiservices
|
20
20
|
Requires-Dist: fastapi>=0.115.12; extra == "apiservices"
|
21
21
|
Requires-Dist: uvicorn>=0.34.2; extra == "apiservices"
|
22
|
+
Provides-Extra: agentops
|
23
|
+
Requires-Dist: agentops>=0.4.12; extra == "agentops"
|
22
24
|
Dynamic: license-file
|
23
25
|
|
24
26
|
<div align="center">
|
@@ -61,17 +63,15 @@ To get started, check out the following example or jump straight into one of the
|
|
61
63
|
|
62
64
|
- 🦜⛓️ [LangGraph](./examples/langgraph/)
|
63
65
|
|
64
|
-
###
|
66
|
+
### Installation
|
65
67
|
|
66
68
|
**GoHumanLoop** currently supports `Python`.
|
67
69
|
|
68
|
-
- Installation
|
69
|
-
|
70
70
|
```shell
|
71
71
|
pip install gohumanloop
|
72
72
|
```
|
73
73
|
|
74
|
-
|
74
|
+
### Example
|
75
75
|
|
76
76
|
The following example enhances [the official LangGraph example](https://langchain-ai.github.io/langgraph/tutorials/get-started/4-human-in-the-loop/#5-resume-execution) with `human-in-the-loop` functionality.
|
77
77
|
|
@@ -95,7 +95,9 @@ from langgraph.prebuilt import ToolNode, tools_condition
|
|
95
95
|
|
96
96
|
from gohumanloop.adapters.langgraph_adapter import interrupt, create_resume_command
|
97
97
|
|
98
|
+
# Please replace with your Deepseek API Key from https://platform.deepseek.com/usage
|
98
99
|
os.environ["DEEPSEEK_API_KEY"] = "sk-xxx"
|
100
|
+
# Please replace with your Tavily API Key from https://app.tavily.com/home
|
99
101
|
os.environ["TAVILY_API_KEY"] = "tvly-xxx"
|
100
102
|
|
101
103
|
llm = init_chat_model("deepseek:deepseek-chat")
|
@@ -169,6 +171,43 @@ for event in events:
|
|
169
171
|
|
170
172
|
```
|
171
173
|
|
174
|
+
- Deployment & Test
|
175
|
+
|
176
|
+
Run the above code with the following steps:
|
177
|
+
|
178
|
+
```shell
|
179
|
+
# 1.Initialize environment
|
180
|
+
uv init gohumanloop-example
|
181
|
+
cd gohumanloop-example
|
182
|
+
uv venv .venv --python=3.10
|
183
|
+
|
184
|
+
# 2.Copy the above code to main.py
|
185
|
+
|
186
|
+
# 3.Deploy and test
|
187
|
+
uv pip install langchain
|
188
|
+
uv pip install langchain_tavily
|
189
|
+
uv pip install langgraph
|
190
|
+
uv pip install langchain-deepseek
|
191
|
+
uv pip install gohumanloop
|
192
|
+
|
193
|
+
python main.py
|
194
|
+
|
195
|
+
```
|
196
|
+
|
197
|
+
- Interaction Demo
|
198
|
+
|
199
|
+

|
200
|
+
|
201
|
+
Perform `human-in-the-loop` interaction by entering:
|
202
|
+
|
203
|
+
> We, the experts are here to help! We'd recommend you check out LangGraph to build your agent.It's much more reliable and extensible than simple autonomous agents.
|
204
|
+
|
205
|
+

|
206
|
+
|
207
|
+
🚀🚀🚀 Completed successfully ~
|
208
|
+
|
209
|
+
➡️ Check out more examples in the [Examples Directory](./examples/) and we look foward to your contributions!
|
210
|
+
|
172
211
|
## 🎵 Why GoHumanloop?
|
173
212
|
|
174
213
|
### Human-in-the-loop
|
@@ -1,26 +1,3 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: gohumanloop
|
3
|
-
Version: 0.0.6
|
4
|
-
Summary: Perfecting AI workflows with human intelligence
|
5
|
-
Author-email: gohumanloop authors <baird0917@163.com>
|
6
|
-
Project-URL: repository, https://github.com/ptonlix/gohumanloop
|
7
|
-
Requires-Python: >=3.10
|
8
|
-
Description-Content-Type: text/markdown
|
9
|
-
License-File: LICENSE
|
10
|
-
Requires-Dist: aiohttp>=3.11.16
|
11
|
-
Requires-Dist: click>=8.1.8
|
12
|
-
Requires-Dist: dotenv>=0.9.9
|
13
|
-
Requires-Dist: pydantic>=2.11.3
|
14
|
-
Requires-Dist: tomli>=2.2.1
|
15
|
-
Provides-Extra: email
|
16
|
-
Requires-Dist: imapclient>=3.0.1; extra == "email"
|
17
|
-
Provides-Extra: langgraph
|
18
|
-
Requires-Dist: langgraph>=0.3.30; extra == "langgraph"
|
19
|
-
Provides-Extra: apiservices
|
20
|
-
Requires-Dist: fastapi>=0.115.12; extra == "apiservices"
|
21
|
-
Requires-Dist: uvicorn>=0.34.2; extra == "apiservices"
|
22
|
-
Dynamic: license-file
|
23
|
-
|
24
1
|
<div align="center">
|
25
2
|
|
26
3
|

|
@@ -61,17 +38,15 @@ To get started, check out the following example or jump straight into one of the
|
|
61
38
|
|
62
39
|
- 🦜⛓️ [LangGraph](./examples/langgraph/)
|
63
40
|
|
64
|
-
###
|
41
|
+
### Installation
|
65
42
|
|
66
43
|
**GoHumanLoop** currently supports `Python`.
|
67
44
|
|
68
|
-
- Installation
|
69
|
-
|
70
45
|
```shell
|
71
46
|
pip install gohumanloop
|
72
47
|
```
|
73
48
|
|
74
|
-
|
49
|
+
### Example
|
75
50
|
|
76
51
|
The following example enhances [the official LangGraph example](https://langchain-ai.github.io/langgraph/tutorials/get-started/4-human-in-the-loop/#5-resume-execution) with `human-in-the-loop` functionality.
|
77
52
|
|
@@ -95,7 +70,9 @@ from langgraph.prebuilt import ToolNode, tools_condition
|
|
95
70
|
|
96
71
|
from gohumanloop.adapters.langgraph_adapter import interrupt, create_resume_command
|
97
72
|
|
73
|
+
# Please replace with your Deepseek API Key from https://platform.deepseek.com/usage
|
98
74
|
os.environ["DEEPSEEK_API_KEY"] = "sk-xxx"
|
75
|
+
# Please replace with your Tavily API Key from https://app.tavily.com/home
|
99
76
|
os.environ["TAVILY_API_KEY"] = "tvly-xxx"
|
100
77
|
|
101
78
|
llm = init_chat_model("deepseek:deepseek-chat")
|
@@ -169,6 +146,43 @@ for event in events:
|
|
169
146
|
|
170
147
|
```
|
171
148
|
|
149
|
+
- Deployment & Test
|
150
|
+
|
151
|
+
Run the above code with the following steps:
|
152
|
+
|
153
|
+
```shell
|
154
|
+
# 1.Initialize environment
|
155
|
+
uv init gohumanloop-example
|
156
|
+
cd gohumanloop-example
|
157
|
+
uv venv .venv --python=3.10
|
158
|
+
|
159
|
+
# 2.Copy the above code to main.py
|
160
|
+
|
161
|
+
# 3.Deploy and test
|
162
|
+
uv pip install langchain
|
163
|
+
uv pip install langchain_tavily
|
164
|
+
uv pip install langgraph
|
165
|
+
uv pip install langchain-deepseek
|
166
|
+
uv pip install gohumanloop
|
167
|
+
|
168
|
+
python main.py
|
169
|
+
|
170
|
+
```
|
171
|
+
|
172
|
+
- Interaction Demo
|
173
|
+
|
174
|
+

|
175
|
+
|
176
|
+
Perform `human-in-the-loop` interaction by entering:
|
177
|
+
|
178
|
+
> We, the experts are here to help! We'd recommend you check out LangGraph to build your agent.It's much more reliable and extensible than simple autonomous agents.
|
179
|
+
|
180
|
+

|
181
|
+
|
182
|
+
🚀🚀🚀 Completed successfully ~
|
183
|
+
|
184
|
+
➡️ Check out more examples in the [Examples Directory](./examples/) and we look foward to your contributions!
|
185
|
+
|
172
186
|
## 🎵 Why GoHumanloop?
|
173
187
|
|
174
188
|
### Human-in-the-loop
|
@@ -1,5 +1,5 @@
|
|
1
1
|
from .langgraph_adapter import (
|
2
|
-
|
2
|
+
HumanloopAdapter,
|
3
3
|
LangGraphHumanLoopCallback,
|
4
4
|
default_langgraph_callback_factory,
|
5
5
|
interrupt,
|
@@ -8,7 +8,7 @@ from .langgraph_adapter import (
|
|
8
8
|
)
|
9
9
|
|
10
10
|
__all__ = [
|
11
|
-
"
|
11
|
+
"HumanloopAdapter",
|
12
12
|
"LangGraphHumanLoopCallback",
|
13
13
|
"default_langgraph_callback_factory",
|
14
14
|
"interrupt",
|
@@ -1,10 +1,10 @@
|
|
1
1
|
from typing import (
|
2
2
|
cast,
|
3
3
|
Dict,
|
4
|
+
List,
|
4
5
|
Any,
|
5
6
|
Optional,
|
6
7
|
Callable,
|
7
|
-
Awaitable,
|
8
8
|
TypeVar,
|
9
9
|
Union,
|
10
10
|
Type,
|
@@ -14,24 +14,21 @@ from typing import (
|
|
14
14
|
)
|
15
15
|
from types import TracebackType
|
16
16
|
from functools import wraps
|
17
|
-
import asyncio
|
18
17
|
import uuid
|
19
|
-
import time
|
20
18
|
from inspect import iscoroutinefunction
|
21
19
|
from contextlib import asynccontextmanager, contextmanager
|
22
20
|
import logging
|
23
21
|
|
24
22
|
from gohumanloop.utils import run_async_safely
|
25
23
|
from gohumanloop.core.interface import (
|
26
|
-
|
24
|
+
HumanLoopRequest,
|
27
25
|
HumanLoopResult,
|
28
26
|
HumanLoopStatus,
|
29
27
|
HumanLoopType,
|
30
28
|
HumanLoopCallback,
|
31
29
|
HumanLoopProvider,
|
30
|
+
HumanLoopManager,
|
32
31
|
)
|
33
|
-
from gohumanloop.core.manager import DefaultHumanLoopManager
|
34
|
-
from gohumanloop.providers.terminal_provider import TerminalProvider
|
35
32
|
|
36
33
|
logger = logging.getLogger(__name__)
|
37
34
|
|
@@ -40,37 +37,6 @@ T = TypeVar("T")
|
|
40
37
|
R = TypeVar("R", bound=Union[Any, None])
|
41
38
|
|
42
39
|
|
43
|
-
# Check LangGraph version
|
44
|
-
def _check_langgraph_version() -> bool:
|
45
|
-
"""Check LangGraph version to determine if interrupt feature is supported"""
|
46
|
-
try:
|
47
|
-
import importlib.metadata
|
48
|
-
|
49
|
-
version = importlib.metadata.version("langgraph")
|
50
|
-
version_parts = version.split(".")
|
51
|
-
major, minor, patch = (
|
52
|
-
int(version_parts[0]),
|
53
|
-
int(version_parts[1]),
|
54
|
-
int(version_parts[2]),
|
55
|
-
)
|
56
|
-
|
57
|
-
# Interrupt support starts from version 0.2.57
|
58
|
-
return major > 0 or (major == 0 and (minor > 2 or (minor == 2 and patch >= 57)))
|
59
|
-
except (importlib.metadata.PackageNotFoundError, ValueError, IndexError):
|
60
|
-
# If version cannot be determined, assume no support
|
61
|
-
return False
|
62
|
-
|
63
|
-
|
64
|
-
# Import corresponding features based on version
|
65
|
-
_SUPPORTS_INTERRUPT = _check_langgraph_version()
|
66
|
-
if _SUPPORTS_INTERRUPT:
|
67
|
-
try:
|
68
|
-
from langgraph.types import interrupt as _lg_interrupt
|
69
|
-
from langgraph.types import Command as _lg_Command
|
70
|
-
except ImportError:
|
71
|
-
_SUPPORTS_INTERRUPT = False
|
72
|
-
|
73
|
-
|
74
40
|
class HumanLoopWrapper:
|
75
41
|
def __init__(
|
76
42
|
self,
|
@@ -85,8 +51,8 @@ class HumanLoopWrapper:
|
|
85
51
|
return self.decorator(fn)
|
86
52
|
|
87
53
|
|
88
|
-
class
|
89
|
-
"""
|
54
|
+
class HumanloopAdapter:
|
55
|
+
"""Humanloop adapter for simplifying human-in-the-loop integration
|
90
56
|
|
91
57
|
Provides decorators for three scenarios:
|
92
58
|
- require_approval: Requires human approval
|
@@ -100,7 +66,7 @@ class LangGraphAdapter:
|
|
100
66
|
self.manager = manager
|
101
67
|
self.default_timeout = default_timeout
|
102
68
|
|
103
|
-
async def __aenter__(self) -> "
|
69
|
+
async def __aenter__(self) -> "HumanloopAdapter":
|
104
70
|
"""Implements async context manager protocol, automatically manages manager lifecycle"""
|
105
71
|
|
106
72
|
manager = cast(Any, self.manager)
|
@@ -122,7 +88,7 @@ class LangGraphAdapter:
|
|
122
88
|
|
123
89
|
return None
|
124
90
|
|
125
|
-
def __enter__(self) -> "
|
91
|
+
def __enter__(self) -> "HumanloopAdapter":
|
126
92
|
"""Implements sync context manager protocol, automatically manages manager lifecycle"""
|
127
93
|
|
128
94
|
manager = cast(Any, self.manager)
|
@@ -145,7 +111,7 @@ class LangGraphAdapter:
|
|
145
111
|
return None
|
146
112
|
|
147
113
|
@asynccontextmanager
|
148
|
-
async def asession(self) -> AsyncIterator["
|
114
|
+
async def asession(self) -> AsyncIterator["HumanloopAdapter"]:
|
149
115
|
"""Provides async context manager for managing session lifecycle
|
150
116
|
|
151
117
|
Example:
|
@@ -162,7 +128,7 @@ class LangGraphAdapter:
|
|
162
128
|
await manager.__aexit__(None, None, None)
|
163
129
|
|
164
130
|
@contextmanager
|
165
|
-
def session(self) -> Iterator["
|
131
|
+
def session(self) -> Iterator["HumanloopAdapter"]:
|
166
132
|
"""Provides a synchronous context manager for managing session lifecycle
|
167
133
|
|
168
134
|
Example:
|
@@ -735,243 +701,138 @@ class LangGraphAdapter:
|
|
735
701
|
return sync_wrapper
|
736
702
|
|
737
703
|
|
738
|
-
class
|
739
|
-
"""
|
740
|
-
|
741
|
-
def __init__(
|
742
|
-
self,
|
743
|
-
state: Any,
|
744
|
-
async_on_update: Optional[
|
745
|
-
Callable[[Any, HumanLoopProvider, HumanLoopResult], Awaitable[None]]
|
746
|
-
] = None,
|
747
|
-
async_on_timeout: Optional[
|
748
|
-
Callable[[Any, HumanLoopProvider], Awaitable[None]]
|
749
|
-
] = None,
|
750
|
-
async_on_error: Optional[
|
751
|
-
Callable[[Any, HumanLoopProvider, Exception], Awaitable[None]]
|
752
|
-
] = None,
|
753
|
-
) -> None:
|
754
|
-
self.state = state
|
755
|
-
self.async_on_update = async_on_update
|
756
|
-
self.async_on_timeout = async_on_timeout
|
757
|
-
self.async_on_error = async_on_error
|
758
|
-
|
759
|
-
async def async_on_humanloop_update(
|
760
|
-
self, provider: HumanLoopProvider, result: HumanLoopResult
|
761
|
-
) -> None:
|
762
|
-
if self.async_on_update:
|
763
|
-
await self.async_on_update(self.state, provider, result)
|
764
|
-
|
765
|
-
async def async_on_humanloop_timeout(
|
766
|
-
self,
|
767
|
-
provider: HumanLoopProvider,
|
768
|
-
) -> None:
|
769
|
-
if self.async_on_timeout:
|
770
|
-
await self.async_on_timeout(self.state, provider)
|
771
|
-
|
772
|
-
async def async_on_humanloop_error(
|
773
|
-
self, provider: HumanLoopProvider, error: Exception
|
774
|
-
) -> None:
|
775
|
-
if self.async_on_error:
|
776
|
-
await self.async_on_error(self.state, provider, error)
|
777
|
-
|
778
|
-
|
779
|
-
def default_langgraph_callback_factory(state: Any) -> LangGraphHumanLoopCallback:
|
780
|
-
"""Default human-loop callback factory for LangGraph framework
|
781
|
-
|
782
|
-
This callback focuses on:
|
783
|
-
1. Logging human interaction events
|
784
|
-
2. Providing debug information
|
785
|
-
3. Collecting performance metrics
|
786
|
-
|
787
|
-
Note: This callback does not modify state to maintain clear state management
|
788
|
-
|
789
|
-
Args:
|
790
|
-
state: LangGraph state object, only used for log correlation
|
791
|
-
|
792
|
-
Returns:
|
793
|
-
Configured LangGraphHumanLoopCallback instance
|
794
|
-
"""
|
795
|
-
|
796
|
-
async def async_on_update(
|
797
|
-
state: Any, provider: HumanLoopProvider, result: HumanLoopResult
|
798
|
-
) -> None:
|
799
|
-
"""Log human interaction update events"""
|
800
|
-
logger.info(f"Provider ID: {provider.name}")
|
801
|
-
logger.info(
|
802
|
-
f"Human interaction update "
|
803
|
-
f"status={result.status}, "
|
804
|
-
f"response={result.response}, "
|
805
|
-
f"responded_by={result.responded_by}, "
|
806
|
-
f"responded_at={result.responded_at}, "
|
807
|
-
f"feedback={result.feedback}"
|
808
|
-
)
|
809
|
-
|
810
|
-
async def async_on_timeout(state: Any, provider: HumanLoopProvider) -> None:
|
811
|
-
"""Log human interaction timeout events"""
|
812
|
-
|
813
|
-
logger.info(f"Provider ID: {provider.name}")
|
814
|
-
from datetime import datetime
|
815
|
-
|
816
|
-
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
817
|
-
logger.warning(f"Human interaction timeout - Time: {current_time}")
|
818
|
-
|
819
|
-
# Alert logic can be added here, such as sending notifications
|
820
|
-
|
821
|
-
async def async_on_error(
|
822
|
-
state: Any, provider: HumanLoopProvider, error: Exception
|
823
|
-
) -> None:
|
824
|
-
"""Log human interaction error events"""
|
825
|
-
|
826
|
-
logger.info(f"Provider ID: {provider.name}")
|
827
|
-
from datetime import datetime
|
828
|
-
|
829
|
-
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
830
|
-
logger.error(f"Human interaction error - Time: {current_time} Error: {error}")
|
831
|
-
|
832
|
-
return LangGraphHumanLoopCallback(
|
833
|
-
state=state,
|
834
|
-
async_on_update=async_on_update,
|
835
|
-
async_on_timeout=async_on_timeout,
|
836
|
-
async_on_error=async_on_error,
|
837
|
-
)
|
838
|
-
|
839
|
-
|
840
|
-
# Create HumanLoopManager instance
|
841
|
-
manager = DefaultHumanLoopManager(
|
842
|
-
initial_providers=TerminalProvider(name="LGDefaultProvider")
|
843
|
-
)
|
844
|
-
|
845
|
-
# Create LangGraphAdapter instance
|
846
|
-
default_adapter = LangGraphAdapter(manager, default_timeout=60)
|
847
|
-
|
848
|
-
default_conversation_id = str(uuid.uuid4())
|
849
|
-
|
850
|
-
_SKIP_NEXT_HUMANLOOP = False
|
851
|
-
|
852
|
-
|
853
|
-
def interrupt(value: Any, lg_humanloop: LangGraphAdapter = default_adapter) -> Any:
|
854
|
-
"""
|
855
|
-
Wraps LangGraph's interrupt functionality to pause graph execution and wait for human input
|
856
|
-
|
857
|
-
Raises RuntimeError if LangGraph version doesn't support interrupt
|
858
|
-
|
859
|
-
Args:
|
860
|
-
value: Any JSON-serializable value that will be shown to human user
|
861
|
-
lg_humanloop: LangGraphAdapter instance, defaults to global instance
|
704
|
+
class AgentOpsHumanLoopCallback(HumanLoopCallback):
|
705
|
+
"""AgentOps-specific human loop callback, compatible with TypedDict or Pydantic BaseModel State
|
862
706
|
|
863
|
-
|
864
|
-
|
707
|
+
This implementation integrates with AgentOps for monitoring and tracking human-in-the-loop interactions.
|
708
|
+
It records events, tracks metrics, and provides observability for human-agent interactions.
|
865
709
|
"""
|
866
710
|
|
867
|
-
|
711
|
+
import importlib.util
|
868
712
|
|
869
|
-
if not
|
870
|
-
|
871
|
-
"LangGraph version too low, interrupt not supported. Please upgrade to version 0.2.57 or higher."
|
872
|
-
"You can use: pip install --upgrade langgraph>=0.2.57"
|
873
|
-
)
|
874
|
-
|
875
|
-
if not _SKIP_NEXT_HUMANLOOP:
|
876
|
-
# Get current event loop or create new one
|
877
|
-
try:
|
878
|
-
lg_humanloop.manager.request_humanloop(
|
879
|
-
task_id="lg_interrupt",
|
880
|
-
conversation_id=default_conversation_id,
|
881
|
-
loop_type=HumanLoopType.INFORMATION,
|
882
|
-
context={
|
883
|
-
"message": f"{value}",
|
884
|
-
"question": "The execution has been interrupted. Please review the above information and provide your input to continue.",
|
885
|
-
},
|
886
|
-
blocking=False,
|
887
|
-
)
|
888
|
-
except Exception as e:
|
889
|
-
logger.exception(f"Error in interrupt: {e}")
|
713
|
+
if importlib.util.find_spec("agentops") is not None:
|
714
|
+
from agentops.sdk import session, agent, operation, task # type: ignore
|
890
715
|
else:
|
891
|
-
|
892
|
-
|
893
|
-
|
894
|
-
# Return LangGraph's interrupt
|
895
|
-
return _lg_interrupt(value)
|
896
|
-
|
897
|
-
|
898
|
-
def create_resume_command(lg_humanloop: LangGraphAdapter = default_adapter) -> Any:
|
899
|
-
"""
|
900
|
-
Create a Command object to resume interrupted graph execution
|
901
|
-
|
902
|
-
Will raise RuntimeError if LangGraph version doesn't support Command
|
903
|
-
|
904
|
-
Args:
|
905
|
-
lg_humanloop: LangGraphAdapter instance, defaults to global instance
|
906
|
-
|
907
|
-
Returns:
|
908
|
-
Command object that can be used with graph.stream method
|
909
|
-
"""
|
910
|
-
|
911
|
-
global _SKIP_NEXT_HUMANLOOP
|
912
|
-
|
913
|
-
if not _SUPPORTS_INTERRUPT:
|
914
|
-
raise RuntimeError(
|
915
|
-
"LangGraph version too low, Command feature not supported. Please upgrade to 0.2.57 or higher."
|
916
|
-
"You can use: pip install --upgrade langgraph>=0.2.57"
|
716
|
+
logger.debug(
|
717
|
+
"AgentOps package not installed. AgentOps features disabled. Please pip install agentops"
|
917
718
|
)
|
918
719
|
|
919
|
-
|
920
|
-
|
921
|
-
poll_interval = 1.0 # Polling interval (seconds)
|
922
|
-
while True:
|
923
|
-
result = lg_humanloop.manager.check_conversation_status(
|
924
|
-
default_conversation_id
|
925
|
-
)
|
926
|
-
# If status is final state (not PENDING), return result
|
927
|
-
if result.status != HumanLoopStatus.PENDING:
|
928
|
-
return result.response
|
929
|
-
# Wait before polling again
|
930
|
-
time.sleep(poll_interval)
|
931
|
-
|
932
|
-
_SKIP_NEXT_HUMANLOOP = True
|
933
|
-
|
934
|
-
response = poll_for_result()
|
935
|
-
return _lg_Command(resume=response)
|
720
|
+
def __init__(self, session_tags: Optional[List[str]] = None) -> None:
|
721
|
+
"""Initialize the AgentOps human loop callback.
|
936
722
|
|
723
|
+
Args:
|
724
|
+
session_tags: Optional tags for AgentOps session tracking
|
725
|
+
"""
|
726
|
+
self.session_tags = session_tags or ["gohumanloop"]
|
937
727
|
|
938
|
-
|
939
|
-
|
940
|
-
) -> Any:
|
941
|
-
"""
|
942
|
-
Create an async version of Command object to resume interrupted graph execution
|
728
|
+
try:
|
729
|
+
import agentops # type: ignore
|
943
730
|
|
944
|
-
|
731
|
+
agentops.init(tags=self.session_tags)
|
732
|
+
except Exception as e:
|
733
|
+
logger.warning(f"Failed to initialize AgentOps: {str(e)}")
|
945
734
|
|
946
|
-
|
947
|
-
|
735
|
+
@operation
|
736
|
+
async def async_on_humanloop_request(
|
737
|
+
self, provider: HumanLoopProvider, request: HumanLoopRequest
|
738
|
+
) -> Any:
|
739
|
+
"""Handle human loop start events."""
|
740
|
+
try:
|
741
|
+
# Create event data
|
742
|
+
event_data = {
|
743
|
+
"event_type": "gohumanloop_request",
|
744
|
+
"provider": provider.name,
|
745
|
+
"task_id": request.task_id,
|
746
|
+
"conversation_id": request.conversation_id,
|
747
|
+
"request_id": request.request_id,
|
748
|
+
"loop_type": request.loop_type.value,
|
749
|
+
"context": request.context,
|
750
|
+
"metadata": request.metadata,
|
751
|
+
"timeout": request.timeout,
|
752
|
+
"created_at": request.created_at,
|
753
|
+
}
|
754
|
+
return event_data
|
755
|
+
except (ImportError, Exception) as e:
|
756
|
+
logger.warning(f"Failed to record AgentOps event: {str(e)}")
|
757
|
+
|
758
|
+
@operation
|
759
|
+
async def async_on_humanloop_update(
|
760
|
+
self, provider: HumanLoopProvider, result: HumanLoopResult
|
761
|
+
) -> Any:
|
762
|
+
"""Handle human loop update events.
|
948
763
|
|
949
|
-
|
950
|
-
|
951
|
-
|
952
|
-
|
764
|
+
Args:
|
765
|
+
provider: The human loop provider instance
|
766
|
+
result: The human loop result containing status and response
|
767
|
+
"""
|
768
|
+
try:
|
769
|
+
# Create event data
|
770
|
+
event_data = {
|
771
|
+
"event_type": "gohumanloop_update",
|
772
|
+
"provider": provider.name,
|
773
|
+
"conversation_id": result.conversation_id,
|
774
|
+
"request_id": result.request_id,
|
775
|
+
"loop_type": result.loop_type.value,
|
776
|
+
"status": result.status.value,
|
777
|
+
"response": result.response,
|
778
|
+
"feedback": result.feedback,
|
779
|
+
"responded_by": result.responded_by,
|
780
|
+
"responded_at": result.responded_at,
|
781
|
+
"error": result.error,
|
782
|
+
}
|
783
|
+
|
784
|
+
return event_data
|
785
|
+
except Exception as e:
|
786
|
+
logger.warning(f"Failed to record AgentOps event: {str(e)}")
|
953
787
|
|
954
|
-
|
955
|
-
|
956
|
-
|
957
|
-
|
958
|
-
|
788
|
+
@operation
|
789
|
+
async def async_on_humanloop_timeout(
|
790
|
+
self, provider: HumanLoopProvider, result: HumanLoopResult
|
791
|
+
) -> Any:
|
792
|
+
"""Handle human loop timeout events.
|
959
793
|
|
960
|
-
|
961
|
-
|
962
|
-
|
963
|
-
|
964
|
-
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
|
971
|
-
|
794
|
+
Args:
|
795
|
+
provider: The human loop provider instance
|
796
|
+
"""
|
797
|
+
try:
|
798
|
+
# Create error event
|
799
|
+
error_data = {
|
800
|
+
"event_type": "gohumanloop_timeout",
|
801
|
+
"provider": provider.name,
|
802
|
+
"conversation_id": result.conversation_id,
|
803
|
+
"request_id": result.request_id,
|
804
|
+
"loop_type": result.loop_type.value,
|
805
|
+
"status": result.status.value,
|
806
|
+
"response": result.response,
|
807
|
+
"feedback": result.feedback,
|
808
|
+
"responded_by": result.responded_by,
|
809
|
+
"responded_at": result.responded_at,
|
810
|
+
"error": result.error,
|
811
|
+
}
|
812
|
+
|
813
|
+
return error_data
|
814
|
+
except Exception as e:
|
815
|
+
logger.warning(f"Failed to record AgentOps timeout event: {str(e)}")
|
972
816
|
|
973
|
-
|
817
|
+
@operation
|
818
|
+
async def async_on_humanloop_error(
|
819
|
+
self, provider: HumanLoopProvider, error: Exception
|
820
|
+
) -> Any:
|
821
|
+
"""Handle human loop error events.
|
974
822
|
|
975
|
-
|
976
|
-
|
977
|
-
|
823
|
+
Args:
|
824
|
+
provider: The human loop provider instance
|
825
|
+
error: The exception that occurred
|
826
|
+
"""
|
827
|
+
try:
|
828
|
+
# Create error event
|
829
|
+
error_data = {
|
830
|
+
"event_type": "gohumanloop_error",
|
831
|
+
"provider": provider.name,
|
832
|
+
"error": str(error),
|
833
|
+
}
|
834
|
+
|
835
|
+
# Record the error event
|
836
|
+
return error_data
|
837
|
+
except Exception as e:
|
838
|
+
logger.warning(f"Failed to record AgentOps error event: {str(e)}")
|