gohumanloop 0.0.5__tar.gz → 0.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. gohumanloop-0.0.7/PKG-INFO +298 -0
  2. gohumanloop-0.0.7/README.md +273 -0
  3. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/__init__.py +6 -8
  4. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/adapters/__init__.py +6 -6
  5. gohumanloop-0.0.5/gohumanloop/adapters/langgraph_adapter.py → gohumanloop-0.0.7/gohumanloop/adapters/base_adapter.py +392 -390
  6. gohumanloop-0.0.7/gohumanloop/adapters/langgraph_adapter.py +344 -0
  7. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/cli/main.py +4 -1
  8. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/core/interface.py +194 -217
  9. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/core/manager.py +375 -266
  10. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/manager/ghl_manager.py +223 -185
  11. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/models/api_model.py +32 -7
  12. gohumanloop-0.0.7/gohumanloop/models/glh_model.py +27 -0
  13. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/providers/api_provider.py +233 -189
  14. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/providers/base.py +179 -172
  15. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/providers/email_provider.py +386 -325
  16. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/providers/ghl_provider.py +19 -17
  17. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/providers/terminal_provider.py +111 -92
  18. gohumanloop-0.0.7/gohumanloop/utils/__init__.py +7 -0
  19. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/utils/context_formatter.py +20 -15
  20. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/utils/threadsafedict.py +64 -56
  21. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/utils/utils.py +28 -28
  22. gohumanloop-0.0.7/gohumanloop.egg-info/PKG-INFO +298 -0
  23. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop.egg-info/SOURCES.txt +1 -0
  24. gohumanloop-0.0.7/gohumanloop.egg-info/requires.txt +18 -0
  25. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/pyproject.toml +36 -3
  26. gohumanloop-0.0.5/PKG-INFO +0 -35
  27. gohumanloop-0.0.5/README.md +0 -19
  28. gohumanloop-0.0.5/gohumanloop/models/glh_model.py +0 -23
  29. gohumanloop-0.0.5/gohumanloop/utils/__init__.py +0 -1
  30. gohumanloop-0.0.5/gohumanloop.egg-info/PKG-INFO +0 -35
  31. gohumanloop-0.0.5/gohumanloop.egg-info/requires.txt +0 -6
  32. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/LICENSE +0 -0
  33. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/__main__.py +0 -0
  34. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/cli/__init__.py +0 -0
  35. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/core/__init__.py +0 -0
  36. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/manager/__init__.py +0 -0
  37. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/models/__init__.py +0 -0
  38. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop/providers/__init__.py +0 -0
  39. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop.egg-info/dependency_links.txt +0 -0
  40. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop.egg-info/entry_points.txt +0 -0
  41. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/gohumanloop.egg-info/top_level.txt +0 -0
  42. {gohumanloop-0.0.5 → gohumanloop-0.0.7}/setup.cfg +0 -0
@@ -0,0 +1,298 @@
1
+ Metadata-Version: 2.4
2
+ Name: gohumanloop
3
+ Version: 0.0.7
4
+ Summary: Perfecting AI workflows with human intelligence
5
+ Author-email: gohumanloop authors <baird0917@163.com>
6
+ Project-URL: repository, https://github.com/ptonlix/gohumanloop
7
+ Requires-Python: >=3.10
8
+ Description-Content-Type: text/markdown
9
+ License-File: LICENSE
10
+ Requires-Dist: aiohttp>=3.11.16
11
+ Requires-Dist: click>=8.1.8
12
+ Requires-Dist: dotenv>=0.9.9
13
+ Requires-Dist: pydantic>=2.11.3
14
+ Requires-Dist: tomli>=2.2.1
15
+ Provides-Extra: email
16
+ Requires-Dist: imapclient>=3.0.1; extra == "email"
17
+ Provides-Extra: langgraph
18
+ Requires-Dist: langgraph>=0.3.30; extra == "langgraph"
19
+ Provides-Extra: apiservices
20
+ Requires-Dist: fastapi>=0.115.12; extra == "apiservices"
21
+ Requires-Dist: uvicorn>=0.34.2; extra == "apiservices"
22
+ Provides-Extra: agentops
23
+ Requires-Dist: agentops>=0.4.12; extra == "agentops"
24
+ Dynamic: license-file
25
+
26
+ <div align="center">
27
+
28
+ ![Wordmark Logo of HumanLayer](./docs/images/wordmark.png)
29
+ <b face="雅黑">Perfecting AI workflows with human intelligence</b>
30
+
31
+ </div>
32
+
33
+ **GoHumanLoop**: A Python library empowering AI agents to dynamically request human input (approval/feedback/conversation) at critical stages. Core features:
34
+
35
+ - `Human-in-the-loop control`: Lets AI agent systems pause and escalate decisions, enhancing safety and trust.
36
+ - `Multi-channel integration`: Supports Terminal, Email, API, and frameworks like LangGraph/CrewAI (soon).
37
+ - `Flexible workflows`: Combines automated reasoning with human oversight for reliable AI operations.
38
+
39
+ Ensures responsible AI deployment by bridging autonomous agents and human judgment.
40
+
41
+ <div align="center">
42
+ <img alt="Repostart" src="https://img.shields.io/github/stars/ptonlix/gohumanloop"/>
43
+ <img alt=" Python" src="https://img.shields.io/badge/Python-3.10%2B-blue"/>
44
+ <img alt="license" src="https://img.shields.io/badge/license-MIT-green"/>
45
+
46
+ [简体中文](README-zh.md) | English
47
+
48
+ </div>
49
+
50
+ ## Table of contents
51
+
52
+ - [Getting Started](#getting-started)
53
+ - [Why GoHumanloop?](#why-humanlayer)
54
+ - [Key Features](#key-features)
55
+ - [Examples](#examples)
56
+ - [Roadmap](#roadmap)
57
+ - [Contributing](#contributing)
58
+ - [License](#license)
59
+
60
+ ## 🎹 Getting Started
61
+
62
+ To get started, check out the following example or jump straight into one of the [Examples](./examples/):
63
+
64
+ - 🦜⛓️ [LangGraph](./examples/langgraph/)
65
+
66
+ ### Installation
67
+
68
+ **GoHumanLoop** currently supports `Python`.
69
+
70
+ ```shell
71
+ pip install gohumanloop
72
+ ```
73
+
74
+ ### Example
75
+
76
+ The following example enhances [the official LangGraph example](https://langchain-ai.github.io/langgraph/tutorials/get-started/4-human-in-the-loop/#5-resume-execution) with `human-in-the-loop` functionality.
77
+
78
+ > 💡 By default, it uses `Terminal` as the `langgraph_adapter` for human interaction.
79
+
80
+ ```python
81
+ import os
82
+ from langchain.chat_models import init_chat_model
83
+ from typing import Annotated
84
+
85
+ from langchain_tavily import TavilySearch
86
+ from langchain_core.tools import tool
87
+ from typing_extensions import TypedDict
88
+
89
+ from langgraph.checkpoint.memory import MemorySaver
90
+ from langgraph.graph import StateGraph, START, END
91
+ from langgraph.graph.message import add_messages
92
+ from langgraph.prebuilt import ToolNode, tools_condition
93
+
94
+ # from langgraph.types import Command, interrupt # Don't use langgraph, use gohumanloop instead
95
+
96
+ from gohumanloop.adapters.langgraph_adapter import interrupt, create_resume_command
97
+
98
+ # Please replace with your Deepseek API Key from https://platform.deepseek.com/usage
99
+ os.environ["DEEPSEEK_API_KEY"] = "sk-xxx"
100
+ # Please replace with your Tavily API Key from https://app.tavily.com/home
101
+ os.environ["TAVILY_API_KEY"] = "tvly-xxx"
102
+
103
+ llm = init_chat_model("deepseek:deepseek-chat")
104
+
105
+ class State(TypedDict):
106
+ messages: Annotated[list, add_messages]
107
+
108
+ graph_builder = StateGraph(State)
109
+
110
+ @tool
111
+ def human_assistance(query: str) -> str:
112
+ """Request assistance from a human."""
113
+ human_response = interrupt({"query": query})
114
+ return human_response
115
+
116
+ tool = TavilySearch(max_results=2)
117
+ tools = [tool, human_assistance]
118
+ llm_with_tools = llm.bind_tools(tools)
119
+
120
+ def chatbot(state: State):
121
+ message = llm_with_tools.invoke(state["messages"])
122
+ # Because we will be interrupting during tool execution,
123
+ # we disable parallel tool calling to avoid repeating any
124
+ # tool invocations when we resume.
125
+ assert len(message.tool_calls) <= 1
126
+ return {"messages": [message]}
127
+
128
+ graph_builder.add_node("chatbot", chatbot)
129
+
130
+ tool_node = ToolNode(tools=tools)
131
+ graph_builder.add_node("tools", tool_node)
132
+
133
+ graph_builder.add_conditional_edges(
134
+ "chatbot",
135
+ tools_condition,
136
+ )
137
+ graph_builder.add_edge("tools", "chatbot")
138
+ graph_builder.add_edge(START, "chatbot")
139
+
140
+ memory = MemorySaver()
141
+
142
+ graph = graph_builder.compile(checkpointer=memory)
143
+
144
+ user_input = "I need some expert guidance for building an AI agent. Could you request assistance for me?"
145
+ config = {"configurable": {"thread_id": "1"}}
146
+
147
+ events = graph.stream(
148
+ {"messages": [{"role": "user", "content": user_input}]},
149
+ config,
150
+ stream_mode="values",
151
+ )
152
+ for event in events:
153
+ if "messages" in event:
154
+ event["messages"][-1].pretty_print()
155
+
156
+ # LangGraph code:
157
+ # human_response = (
158
+ # "We, the experts are here to help! We'd recommend you check out LangGraph to build your agent."
159
+ # "It's much more reliable and extensible than simple autonomous agents."
160
+ # )
161
+
162
+ # human_command = Command(resume={"data": human_response})
163
+
164
+ # GoHumanLoop code:
165
+ human_command = create_resume_command() # Use this command to resume the execution,instead of using the command above
166
+
167
+ events = graph.stream(human_command, config, stream_mode="values")
168
+ for event in events:
169
+ if "messages" in event:
170
+ event["messages"][-1].pretty_print()
171
+
172
+ ```
173
+
174
+ - Deployment & Test
175
+
176
+ Run the above code with the following steps:
177
+
178
+ ```shell
179
+ # 1.Initialize environment
180
+ uv init gohumanloop-example
181
+ cd gohumanloop-example
182
+ uv venv .venv --python=3.10
183
+
184
+ # 2.Copy the above code to main.py
185
+
186
+ # 3.Deploy and test
187
+ uv pip install langchain
188
+ uv pip install langchain_tavily
189
+ uv pip install langgraph
190
+ uv pip install langchain-deepseek
191
+ uv pip install gohumanloop
192
+
193
+ python main.py
194
+
195
+ ```
196
+
197
+ - Interaction Demo
198
+
199
+ ![终端展示](http://cdn.oyster-iot.cloud/202505232244870.png)
200
+
201
+ Perform `human-in-the-loop` interaction by entering:
202
+
203
+ > We, the experts are here to help! We'd recommend you check out LangGraph to build your agent.It's much more reliable and extensible than simple autonomous agents.
204
+
205
+ ![输出结果](http://cdn.oyster-iot.cloud/202505232248390.png)
206
+
207
+ 🚀🚀🚀 Completed successfully ~
208
+
209
+ ➡️ Check out more examples in the [Examples Directory](./examples/) and we look foward to your contributions!
210
+
211
+ ## 🎵 Why GoHumanloop?
212
+
213
+ ### Human-in-the-loop
214
+
215
+ <div align="center">
216
+ <img height=240 src="http://cdn.oyster-iot.cloud/202505210851404.png"><br>
217
+ <b face="雅黑">Even with state-of-the-art agentic reasoning and prompt routing, LLMs are not sufficiently reliable to be given access to high-stakes functions without human oversight</b>
218
+ </div>
219
+ <br>
220
+
221
+ `Human-in-the-loop` is an AI system design philosophy that integrates human judgment and supervision into AI decision-making processes. This concept is particularly important in AI Agent systems:
222
+
223
+ - **Safety Assurance**: Allows human intervention and review at critical decision points to prevent potentially harmful AI decisions
224
+ - **Quality Control**: Improves accuracy and reliability of AI outputs through expert feedback
225
+ - **Continuous Learning**: AI systems can learn and improve from human feedback, creating a virtuous cycle
226
+ - **Clear Accountability**: Maintains ultimate human control over important decisions with clear responsibility
227
+
228
+ In practice, Human-in-the-loop can take various forms - from simple decision confirmation to deep human-AI collaborative dialogues - ensuring optimal balance between autonomy and human oversight to maximize the potential of AI Agent systems.
229
+
230
+ #### Typical Use Cases
231
+
232
+ <div align="center">
233
+ <img height=120 src="http://cdn.oyster-iot.cloud/tool-call-review.png"><br>
234
+ <b face="雅黑"> A human can review and edit the output from the agent before proceeding. This is particularly critical in applications where the tool calls requested may be sensitive or require human oversight.</b>
235
+ </div>
236
+ <br>
237
+
238
+ - 🛠️ Tool Call Review: Humans can review, edit or approve tool call requests initiated by LLMs before execution
239
+ - ✅ Model Output Verification: Humans can review, edit or approve content generated by LLMs (text, decisions, etc.)
240
+ - 💡 Context Provision: Allows LLMs to actively request human input for clarification, additional details or multi-turn conversation context
241
+
242
+ ### Secure and Efficient Go➡Humanloop
243
+
244
+ `GoHumanloop` provides a set of tools deeply integrated within AI Agents to ensure constant `Human-in-the-loop` oversight. It deterministically ensures high-risk function calls must undergo human review while also enabling human expert feedback, thereby improving AI system reliability and safety while reducing risks from LLM hallucinations.
245
+
246
+ <div align="center">
247
+ <img height=420 src="http://cdn.oyster-iot.cloud/202505210943862.png"><br>
248
+ <b face="雅黑"> The Outer-Loop and Inversion of Control</b>
249
+ </div>
250
+ <br>
251
+
252
+ Through `GoHumanloop`'s encapsulation, you can implement secure and efficient `Human-in-the-loop` when requesting tools, Agent nodes, MCP services and other Agents.
253
+
254
+ ## 📚 Key Features
255
+
256
+ <div align="center">
257
+ <img height=360 src="http://cdn.oyster-iot.cloud/202505211030197.png"><br>
258
+ <b face="雅黑"> GoHumanLoop Architecture</b>
259
+ </div>
260
+ <br>
261
+
262
+ `GoHumanloop` offers the following core capabilities:
263
+
264
+ - **Approval:** Requests human review or approval when executing specific tool calls or Agent nodes
265
+ - **Information:** Obtains critical human input during task execution to reduce LLM hallucination risks
266
+ - **Conversation:** Enables multi-turn interactions with humans through dialogue to acquire richer contextual information
267
+ - **Framework-specific Integration:** Provides specialized integration methods for specific Agent frameworks, such as `interrupt` and `resume` for `LangGraph`
268
+
269
+ ## 📅 Roadmap
270
+
271
+ | Feature | Status |
272
+ | ----------------- | ---------- |
273
+ | Approval | ⚙️ Beta |
274
+ | Information | ⚙️ Beta |
275
+ | Conversation | ⚙️ Beta |
276
+ | Email Provider | ⚙️ Beta |
277
+ | Terminal Provider | ⚙️ Beta |
278
+ | API Provider | ⚙️ Beta |
279
+ | Default Manager | ⚙️ Beta |
280
+ | GLH Manager | 🗓️ Planned |
281
+ | Langchain Support | ⚙️ Beta |
282
+ | CrewAI Support | 🗓️ Planned |
283
+
284
+ - 💡 GLH Manager - GoHumanLoop Manager will integrate with the upcoming GoHumanLoop Hub platform to provide users with more flexible management options.
285
+
286
+ ## 🤝 Contributing
287
+
288
+ The GoHumanLoop SDK and documentation are open source. We welcome contributions in the form of issues, documentation and PRs. For more details, please see [CONTRIBUTING.md](./CONTRIBUTING.md)
289
+
290
+ ## 📱 Contact
291
+
292
+ <img height=300 src="http://cdn.oyster-iot.cloud/202505231802103.png"/>
293
+
294
+ 🎉 If you're interested in this project, feel free to scan the QR code to contact the author.
295
+
296
+ ## 🌟 Star History
297
+
298
+ [![Star History Chart](https://api.star-history.com/svg?repos=gohumanloop/gohumanloop&type=Date)](https://www.star-history.com/#gohumanloop/gohumanloop&Date)
@@ -0,0 +1,273 @@
1
+ <div align="center">
2
+
3
+ ![Wordmark Logo of HumanLayer](./docs/images/wordmark.png)
4
+ <b face="雅黑">Perfecting AI workflows with human intelligence</b>
5
+
6
+ </div>
7
+
8
+ **GoHumanLoop**: A Python library empowering AI agents to dynamically request human input (approval/feedback/conversation) at critical stages. Core features:
9
+
10
+ - `Human-in-the-loop control`: Lets AI agent systems pause and escalate decisions, enhancing safety and trust.
11
+ - `Multi-channel integration`: Supports Terminal, Email, API, and frameworks like LangGraph/CrewAI (soon).
12
+ - `Flexible workflows`: Combines automated reasoning with human oversight for reliable AI operations.
13
+
14
+ Ensures responsible AI deployment by bridging autonomous agents and human judgment.
15
+
16
+ <div align="center">
17
+ <img alt="Repostart" src="https://img.shields.io/github/stars/ptonlix/gohumanloop"/>
18
+ <img alt=" Python" src="https://img.shields.io/badge/Python-3.10%2B-blue"/>
19
+ <img alt="license" src="https://img.shields.io/badge/license-MIT-green"/>
20
+
21
+ [简体中文](README-zh.md) | English
22
+
23
+ </div>
24
+
25
+ ## Table of contents
26
+
27
+ - [Getting Started](#getting-started)
28
+ - [Why GoHumanloop?](#why-humanlayer)
29
+ - [Key Features](#key-features)
30
+ - [Examples](#examples)
31
+ - [Roadmap](#roadmap)
32
+ - [Contributing](#contributing)
33
+ - [License](#license)
34
+
35
+ ## 🎹 Getting Started
36
+
37
+ To get started, check out the following example or jump straight into one of the [Examples](./examples/):
38
+
39
+ - 🦜⛓️ [LangGraph](./examples/langgraph/)
40
+
41
+ ### Installation
42
+
43
+ **GoHumanLoop** currently supports `Python`.
44
+
45
+ ```shell
46
+ pip install gohumanloop
47
+ ```
48
+
49
+ ### Example
50
+
51
+ The following example enhances [the official LangGraph example](https://langchain-ai.github.io/langgraph/tutorials/get-started/4-human-in-the-loop/#5-resume-execution) with `human-in-the-loop` functionality.
52
+
53
+ > 💡 By default, it uses `Terminal` as the `langgraph_adapter` for human interaction.
54
+
55
+ ```python
56
+ import os
57
+ from langchain.chat_models import init_chat_model
58
+ from typing import Annotated
59
+
60
+ from langchain_tavily import TavilySearch
61
+ from langchain_core.tools import tool
62
+ from typing_extensions import TypedDict
63
+
64
+ from langgraph.checkpoint.memory import MemorySaver
65
+ from langgraph.graph import StateGraph, START, END
66
+ from langgraph.graph.message import add_messages
67
+ from langgraph.prebuilt import ToolNode, tools_condition
68
+
69
+ # from langgraph.types import Command, interrupt # Don't use langgraph, use gohumanloop instead
70
+
71
+ from gohumanloop.adapters.langgraph_adapter import interrupt, create_resume_command
72
+
73
+ # Please replace with your Deepseek API Key from https://platform.deepseek.com/usage
74
+ os.environ["DEEPSEEK_API_KEY"] = "sk-xxx"
75
+ # Please replace with your Tavily API Key from https://app.tavily.com/home
76
+ os.environ["TAVILY_API_KEY"] = "tvly-xxx"
77
+
78
+ llm = init_chat_model("deepseek:deepseek-chat")
79
+
80
+ class State(TypedDict):
81
+ messages: Annotated[list, add_messages]
82
+
83
+ graph_builder = StateGraph(State)
84
+
85
+ @tool
86
+ def human_assistance(query: str) -> str:
87
+ """Request assistance from a human."""
88
+ human_response = interrupt({"query": query})
89
+ return human_response
90
+
91
+ tool = TavilySearch(max_results=2)
92
+ tools = [tool, human_assistance]
93
+ llm_with_tools = llm.bind_tools(tools)
94
+
95
+ def chatbot(state: State):
96
+ message = llm_with_tools.invoke(state["messages"])
97
+ # Because we will be interrupting during tool execution,
98
+ # we disable parallel tool calling to avoid repeating any
99
+ # tool invocations when we resume.
100
+ assert len(message.tool_calls) <= 1
101
+ return {"messages": [message]}
102
+
103
+ graph_builder.add_node("chatbot", chatbot)
104
+
105
+ tool_node = ToolNode(tools=tools)
106
+ graph_builder.add_node("tools", tool_node)
107
+
108
+ graph_builder.add_conditional_edges(
109
+ "chatbot",
110
+ tools_condition,
111
+ )
112
+ graph_builder.add_edge("tools", "chatbot")
113
+ graph_builder.add_edge(START, "chatbot")
114
+
115
+ memory = MemorySaver()
116
+
117
+ graph = graph_builder.compile(checkpointer=memory)
118
+
119
+ user_input = "I need some expert guidance for building an AI agent. Could you request assistance for me?"
120
+ config = {"configurable": {"thread_id": "1"}}
121
+
122
+ events = graph.stream(
123
+ {"messages": [{"role": "user", "content": user_input}]},
124
+ config,
125
+ stream_mode="values",
126
+ )
127
+ for event in events:
128
+ if "messages" in event:
129
+ event["messages"][-1].pretty_print()
130
+
131
+ # LangGraph code:
132
+ # human_response = (
133
+ # "We, the experts are here to help! We'd recommend you check out LangGraph to build your agent."
134
+ # "It's much more reliable and extensible than simple autonomous agents."
135
+ # )
136
+
137
+ # human_command = Command(resume={"data": human_response})
138
+
139
+ # GoHumanLoop code:
140
+ human_command = create_resume_command() # Use this command to resume the execution,instead of using the command above
141
+
142
+ events = graph.stream(human_command, config, stream_mode="values")
143
+ for event in events:
144
+ if "messages" in event:
145
+ event["messages"][-1].pretty_print()
146
+
147
+ ```
148
+
149
+ - Deployment & Test
150
+
151
+ Run the above code with the following steps:
152
+
153
+ ```shell
154
+ # 1.Initialize environment
155
+ uv init gohumanloop-example
156
+ cd gohumanloop-example
157
+ uv venv .venv --python=3.10
158
+
159
+ # 2.Copy the above code to main.py
160
+
161
+ # 3.Deploy and test
162
+ uv pip install langchain
163
+ uv pip install langchain_tavily
164
+ uv pip install langgraph
165
+ uv pip install langchain-deepseek
166
+ uv pip install gohumanloop
167
+
168
+ python main.py
169
+
170
+ ```
171
+
172
+ - Interaction Demo
173
+
174
+ ![终端展示](http://cdn.oyster-iot.cloud/202505232244870.png)
175
+
176
+ Perform `human-in-the-loop` interaction by entering:
177
+
178
+ > We, the experts are here to help! We'd recommend you check out LangGraph to build your agent.It's much more reliable and extensible than simple autonomous agents.
179
+
180
+ ![输出结果](http://cdn.oyster-iot.cloud/202505232248390.png)
181
+
182
+ 🚀🚀🚀 Completed successfully ~
183
+
184
+ ➡️ Check out more examples in the [Examples Directory](./examples/) and we look foward to your contributions!
185
+
186
+ ## 🎵 Why GoHumanloop?
187
+
188
+ ### Human-in-the-loop
189
+
190
+ <div align="center">
191
+ <img height=240 src="http://cdn.oyster-iot.cloud/202505210851404.png"><br>
192
+ <b face="雅黑">Even with state-of-the-art agentic reasoning and prompt routing, LLMs are not sufficiently reliable to be given access to high-stakes functions without human oversight</b>
193
+ </div>
194
+ <br>
195
+
196
+ `Human-in-the-loop` is an AI system design philosophy that integrates human judgment and supervision into AI decision-making processes. This concept is particularly important in AI Agent systems:
197
+
198
+ - **Safety Assurance**: Allows human intervention and review at critical decision points to prevent potentially harmful AI decisions
199
+ - **Quality Control**: Improves accuracy and reliability of AI outputs through expert feedback
200
+ - **Continuous Learning**: AI systems can learn and improve from human feedback, creating a virtuous cycle
201
+ - **Clear Accountability**: Maintains ultimate human control over important decisions with clear responsibility
202
+
203
+ In practice, Human-in-the-loop can take various forms - from simple decision confirmation to deep human-AI collaborative dialogues - ensuring optimal balance between autonomy and human oversight to maximize the potential of AI Agent systems.
204
+
205
+ #### Typical Use Cases
206
+
207
+ <div align="center">
208
+ <img height=120 src="http://cdn.oyster-iot.cloud/tool-call-review.png"><br>
209
+ <b face="雅黑"> A human can review and edit the output from the agent before proceeding. This is particularly critical in applications where the tool calls requested may be sensitive or require human oversight.</b>
210
+ </div>
211
+ <br>
212
+
213
+ - 🛠️ Tool Call Review: Humans can review, edit or approve tool call requests initiated by LLMs before execution
214
+ - ✅ Model Output Verification: Humans can review, edit or approve content generated by LLMs (text, decisions, etc.)
215
+ - 💡 Context Provision: Allows LLMs to actively request human input for clarification, additional details or multi-turn conversation context
216
+
217
+ ### Secure and Efficient Go➡Humanloop
218
+
219
+ `GoHumanloop` provides a set of tools deeply integrated within AI Agents to ensure constant `Human-in-the-loop` oversight. It deterministically ensures high-risk function calls must undergo human review while also enabling human expert feedback, thereby improving AI system reliability and safety while reducing risks from LLM hallucinations.
220
+
221
+ <div align="center">
222
+ <img height=420 src="http://cdn.oyster-iot.cloud/202505210943862.png"><br>
223
+ <b face="雅黑"> The Outer-Loop and Inversion of Control</b>
224
+ </div>
225
+ <br>
226
+
227
+ Through `GoHumanloop`'s encapsulation, you can implement secure and efficient `Human-in-the-loop` when requesting tools, Agent nodes, MCP services and other Agents.
228
+
229
+ ## 📚 Key Features
230
+
231
+ <div align="center">
232
+ <img height=360 src="http://cdn.oyster-iot.cloud/202505211030197.png"><br>
233
+ <b face="雅黑"> GoHumanLoop Architecture</b>
234
+ </div>
235
+ <br>
236
+
237
+ `GoHumanloop` offers the following core capabilities:
238
+
239
+ - **Approval:** Requests human review or approval when executing specific tool calls or Agent nodes
240
+ - **Information:** Obtains critical human input during task execution to reduce LLM hallucination risks
241
+ - **Conversation:** Enables multi-turn interactions with humans through dialogue to acquire richer contextual information
242
+ - **Framework-specific Integration:** Provides specialized integration methods for specific Agent frameworks, such as `interrupt` and `resume` for `LangGraph`
243
+
244
+ ## 📅 Roadmap
245
+
246
+ | Feature | Status |
247
+ | ----------------- | ---------- |
248
+ | Approval | ⚙️ Beta |
249
+ | Information | ⚙️ Beta |
250
+ | Conversation | ⚙️ Beta |
251
+ | Email Provider | ⚙️ Beta |
252
+ | Terminal Provider | ⚙️ Beta |
253
+ | API Provider | ⚙️ Beta |
254
+ | Default Manager | ⚙️ Beta |
255
+ | GLH Manager | 🗓️ Planned |
256
+ | Langchain Support | ⚙️ Beta |
257
+ | CrewAI Support | 🗓️ Planned |
258
+
259
+ - 💡 GLH Manager - GoHumanLoop Manager will integrate with the upcoming GoHumanLoop Hub platform to provide users with more flexible management options.
260
+
261
+ ## 🤝 Contributing
262
+
263
+ The GoHumanLoop SDK and documentation are open source. We welcome contributions in the form of issues, documentation and PRs. For more details, please see [CONTRIBUTING.md](./CONTRIBUTING.md)
264
+
265
+ ## 📱 Contact
266
+
267
+ <img height=300 src="http://cdn.oyster-iot.cloud/202505231802103.png"/>
268
+
269
+ 🎉 If you're interested in this project, feel free to scan the QR code to contact the author.
270
+
271
+ ## 🌟 Star History
272
+
273
+ [![Star History Chart](https://api.star-history.com/svg?repos=gohumanloop/gohumanloop&type=Date)](https://www.star-history.com/#gohumanloop/gohumanloop&Date)
@@ -19,7 +19,8 @@ from gohumanloop.utils import run_async_safely, get_secret_from_env
19
19
 
20
20
  # Conditionally import EmailProvider
21
21
  try:
22
- from gohumanloop.providers.email_provider import EmailProvider
22
+ from gohumanloop.providers.email_provider import EmailProvider # noqa: F401
23
+
23
24
  _has_email = True
24
25
  except ImportError:
25
26
  _has_email = False
@@ -27,15 +28,16 @@ except ImportError:
27
28
  # Dynamically get version number
28
29
  try:
29
30
  from importlib.metadata import version, PackageNotFoundError
31
+
30
32
  try:
31
33
  __version__ = version("gohumanloop")
32
34
  except PackageNotFoundError:
33
35
  import os
34
36
  import tomli
35
-
37
+
36
38
  root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
37
39
  pyproject_path = os.path.join(root_dir, "pyproject.toml")
38
-
40
+
39
41
  with open(pyproject_path, "rb") as f:
40
42
  pyproject_data = tomli.load(f)
41
43
  __version__ = pyproject_data["project"]["version"]
@@ -50,24 +52,20 @@ __all__ = [
50
52
  "HumanLoopResult",
51
53
  "HumanLoopStatus",
52
54
  "HumanLoopType",
53
-
54
55
  # Manager Implementations
55
56
  "DefaultHumanLoopManager",
56
57
  "GoHumanLoopManager",
57
-
58
58
  # Provider Implementations
59
59
  "BaseProvider",
60
60
  "APIProvider",
61
61
  "GoHumanLoopProvider",
62
62
  "TerminalProvider",
63
-
64
63
  # Utility Functions
65
64
  "run_async_safely",
66
65
  "get_secret_from_env",
67
-
68
66
  # Version Information
69
67
  "__version__",
70
68
  ]
71
69
 
72
70
  if _has_email:
73
- __all__.append("EmailProvider")
71
+ __all__.append("EmailProvider")
@@ -1,17 +1,17 @@
1
1
  from .langgraph_adapter import (
2
- LangGraphAdapter,
2
+ HumanloopAdapter,
3
3
  LangGraphHumanLoopCallback,
4
4
  default_langgraph_callback_factory,
5
5
  interrupt,
6
6
  create_resume_command,
7
- acreate_resume_command
8
- )
7
+ acreate_resume_command,
8
+ )
9
9
 
10
10
  __all__ = [
11
- "LangGraphAdapter",
11
+ "HumanloopAdapter",
12
12
  "LangGraphHumanLoopCallback",
13
13
  "default_langgraph_callback_factory",
14
14
  "interrupt",
15
15
  "create_resume_command",
16
- "acreate_resume_command"
17
- ]
16
+ "acreate_resume_command",
17
+ ]