gohumanloop 0.0.4__tar.gz → 0.0.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. gohumanloop-0.0.6/PKG-INFO +259 -0
  2. gohumanloop-0.0.6/README.md +236 -0
  3. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/__init__.py +15 -9
  4. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/adapters/__init__.py +4 -4
  5. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/adapters/langgraph_adapter.py +365 -220
  6. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/cli/main.py +4 -1
  7. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/core/interface.py +181 -215
  8. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/core/manager.py +341 -361
  9. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/manager/ghl_manager.py +223 -185
  10. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/models/api_model.py +32 -7
  11. gohumanloop-0.0.6/gohumanloop/models/glh_model.py +27 -0
  12. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/providers/api_provider.py +233 -189
  13. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/providers/base.py +179 -172
  14. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/providers/email_provider.py +386 -325
  15. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/providers/ghl_provider.py +19 -17
  16. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/providers/terminal_provider.py +111 -92
  17. gohumanloop-0.0.6/gohumanloop/utils/__init__.py +7 -0
  18. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/utils/context_formatter.py +20 -15
  19. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/utils/threadsafedict.py +64 -56
  20. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/utils/utils.py +28 -28
  21. gohumanloop-0.0.6/gohumanloop.egg-info/PKG-INFO +259 -0
  22. gohumanloop-0.0.6/gohumanloop.egg-info/requires.txt +15 -0
  23. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/pyproject.toml +33 -3
  24. gohumanloop-0.0.4/PKG-INFO +0 -35
  25. gohumanloop-0.0.4/README.md +0 -19
  26. gohumanloop-0.0.4/gohumanloop/models/glh_model.py +0 -23
  27. gohumanloop-0.0.4/gohumanloop/utils/__init__.py +0 -1
  28. gohumanloop-0.0.4/gohumanloop.egg-info/PKG-INFO +0 -35
  29. gohumanloop-0.0.4/gohumanloop.egg-info/requires.txt +0 -6
  30. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/LICENSE +0 -0
  31. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/__main__.py +0 -0
  32. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/cli/__init__.py +0 -0
  33. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/core/__init__.py +0 -0
  34. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/manager/__init__.py +0 -0
  35. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/models/__init__.py +0 -0
  36. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop/providers/__init__.py +0 -0
  37. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop.egg-info/SOURCES.txt +0 -0
  38. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop.egg-info/dependency_links.txt +0 -0
  39. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop.egg-info/entry_points.txt +0 -0
  40. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/gohumanloop.egg-info/top_level.txt +0 -0
  41. {gohumanloop-0.0.4 → gohumanloop-0.0.6}/setup.cfg +0 -0
@@ -0,0 +1,259 @@
1
+ Metadata-Version: 2.4
2
+ Name: gohumanloop
3
+ Version: 0.0.6
4
+ Summary: Perfecting AI workflows with human intelligence
5
+ Author-email: gohumanloop authors <baird0917@163.com>
6
+ Project-URL: repository, https://github.com/ptonlix/gohumanloop
7
+ Requires-Python: >=3.10
8
+ Description-Content-Type: text/markdown
9
+ License-File: LICENSE
10
+ Requires-Dist: aiohttp>=3.11.16
11
+ Requires-Dist: click>=8.1.8
12
+ Requires-Dist: dotenv>=0.9.9
13
+ Requires-Dist: pydantic>=2.11.3
14
+ Requires-Dist: tomli>=2.2.1
15
+ Provides-Extra: email
16
+ Requires-Dist: imapclient>=3.0.1; extra == "email"
17
+ Provides-Extra: langgraph
18
+ Requires-Dist: langgraph>=0.3.30; extra == "langgraph"
19
+ Provides-Extra: apiservices
20
+ Requires-Dist: fastapi>=0.115.12; extra == "apiservices"
21
+ Requires-Dist: uvicorn>=0.34.2; extra == "apiservices"
22
+ Dynamic: license-file
23
+
24
+ <div align="center">
25
+
26
+ ![Wordmark Logo of HumanLayer](./docs/images/wordmark.png)
27
+ <b face="雅黑">Perfecting AI workflows with human intelligence</b>
28
+
29
+ </div>
30
+
31
+ **GoHumanLoop**: A Python library empowering AI agents to dynamically request human input (approval/feedback/conversation) at critical stages. Core features:
32
+
33
+ - `Human-in-the-loop control`: Lets AI agent systems pause and escalate decisions, enhancing safety and trust.
34
+ - `Multi-channel integration`: Supports Terminal, Email, API, and frameworks like LangGraph/CrewAI (soon).
35
+ - `Flexible workflows`: Combines automated reasoning with human oversight for reliable AI operations.
36
+
37
+ Ensures responsible AI deployment by bridging autonomous agents and human judgment.
38
+
39
+ <div align="center">
40
+ <img alt="Repostart" src="https://img.shields.io/github/stars/ptonlix/gohumanloop"/>
41
+ <img alt=" Python" src="https://img.shields.io/badge/Python-3.10%2B-blue"/>
42
+ <img alt="license" src="https://img.shields.io/badge/license-MIT-green"/>
43
+
44
+ [简体中文](README-zh.md) | English
45
+
46
+ </div>
47
+
48
+ ## Table of contents
49
+
50
+ - [Getting Started](#getting-started)
51
+ - [Why GoHumanloop?](#why-humanlayer)
52
+ - [Key Features](#key-features)
53
+ - [Examples](#examples)
54
+ - [Roadmap](#roadmap)
55
+ - [Contributing](#contributing)
56
+ - [License](#license)
57
+
58
+ ## 🎹 Getting Started
59
+
60
+ To get started, check out the following example or jump straight into one of the [Examples](./examples/):
61
+
62
+ - 🦜⛓️ [LangGraph](./examples/langgraph/)
63
+
64
+ ### Example
65
+
66
+ **GoHumanLoop** currently supports `Python`.
67
+
68
+ - Installation
69
+
70
+ ```shell
71
+ pip install gohumanloop
72
+ ```
73
+
74
+ - Example
75
+
76
+ The following example enhances [the official LangGraph example](https://langchain-ai.github.io/langgraph/tutorials/get-started/4-human-in-the-loop/#5-resume-execution) with `human-in-the-loop` functionality.
77
+
78
+ > 💡 By default, it uses `Terminal` as the `langgraph_adapter` for human interaction.
79
+
80
+ ```python
81
+ import os
82
+ from langchain.chat_models import init_chat_model
83
+ from typing import Annotated
84
+
85
+ from langchain_tavily import TavilySearch
86
+ from langchain_core.tools import tool
87
+ from typing_extensions import TypedDict
88
+
89
+ from langgraph.checkpoint.memory import MemorySaver
90
+ from langgraph.graph import StateGraph, START, END
91
+ from langgraph.graph.message import add_messages
92
+ from langgraph.prebuilt import ToolNode, tools_condition
93
+
94
+ # from langgraph.types import Command, interrupt # Don't use langgraph, use gohumanloop instead
95
+
96
+ from gohumanloop.adapters.langgraph_adapter import interrupt, create_resume_command
97
+
98
+ os.environ["DEEPSEEK_API_KEY"] = "sk-xxx"
99
+ os.environ["TAVILY_API_KEY"] = "tvly-xxx"
100
+
101
+ llm = init_chat_model("deepseek:deepseek-chat")
102
+
103
+ class State(TypedDict):
104
+ messages: Annotated[list, add_messages]
105
+
106
+ graph_builder = StateGraph(State)
107
+
108
+ @tool
109
+ def human_assistance(query: str) -> str:
110
+ """Request assistance from a human."""
111
+ human_response = interrupt({"query": query})
112
+ return human_response
113
+
114
+ tool = TavilySearch(max_results=2)
115
+ tools = [tool, human_assistance]
116
+ llm_with_tools = llm.bind_tools(tools)
117
+
118
+ def chatbot(state: State):
119
+ message = llm_with_tools.invoke(state["messages"])
120
+ # Because we will be interrupting during tool execution,
121
+ # we disable parallel tool calling to avoid repeating any
122
+ # tool invocations when we resume.
123
+ assert len(message.tool_calls) <= 1
124
+ return {"messages": [message]}
125
+
126
+ graph_builder.add_node("chatbot", chatbot)
127
+
128
+ tool_node = ToolNode(tools=tools)
129
+ graph_builder.add_node("tools", tool_node)
130
+
131
+ graph_builder.add_conditional_edges(
132
+ "chatbot",
133
+ tools_condition,
134
+ )
135
+ graph_builder.add_edge("tools", "chatbot")
136
+ graph_builder.add_edge(START, "chatbot")
137
+
138
+ memory = MemorySaver()
139
+
140
+ graph = graph_builder.compile(checkpointer=memory)
141
+
142
+ user_input = "I need some expert guidance for building an AI agent. Could you request assistance for me?"
143
+ config = {"configurable": {"thread_id": "1"}}
144
+
145
+ events = graph.stream(
146
+ {"messages": [{"role": "user", "content": user_input}]},
147
+ config,
148
+ stream_mode="values",
149
+ )
150
+ for event in events:
151
+ if "messages" in event:
152
+ event["messages"][-1].pretty_print()
153
+
154
+ # LangGraph code:
155
+ # human_response = (
156
+ # "We, the experts are here to help! We'd recommend you check out LangGraph to build your agent."
157
+ # "It's much more reliable and extensible than simple autonomous agents."
158
+ # )
159
+
160
+ # human_command = Command(resume={"data": human_response})
161
+
162
+ # GoHumanLoop code:
163
+ human_command = create_resume_command() # Use this command to resume the execution,instead of using the command above
164
+
165
+ events = graph.stream(human_command, config, stream_mode="values")
166
+ for event in events:
167
+ if "messages" in event:
168
+ event["messages"][-1].pretty_print()
169
+
170
+ ```
171
+
172
+ ## 🎵 Why GoHumanloop?
173
+
174
+ ### Human-in-the-loop
175
+
176
+ <div align="center">
177
+ <img height=240 src="http://cdn.oyster-iot.cloud/202505210851404.png"><br>
178
+ <b face="雅黑">Even with state-of-the-art agentic reasoning and prompt routing, LLMs are not sufficiently reliable to be given access to high-stakes functions without human oversight</b>
179
+ </div>
180
+ <br>
181
+
182
+ `Human-in-the-loop` is an AI system design philosophy that integrates human judgment and supervision into AI decision-making processes. This concept is particularly important in AI Agent systems:
183
+
184
+ - **Safety Assurance**: Allows human intervention and review at critical decision points to prevent potentially harmful AI decisions
185
+ - **Quality Control**: Improves accuracy and reliability of AI outputs through expert feedback
186
+ - **Continuous Learning**: AI systems can learn and improve from human feedback, creating a virtuous cycle
187
+ - **Clear Accountability**: Maintains ultimate human control over important decisions with clear responsibility
188
+
189
+ In practice, Human-in-the-loop can take various forms - from simple decision confirmation to deep human-AI collaborative dialogues - ensuring optimal balance between autonomy and human oversight to maximize the potential of AI Agent systems.
190
+
191
+ #### Typical Use Cases
192
+
193
+ <div align="center">
194
+ <img height=120 src="http://cdn.oyster-iot.cloud/tool-call-review.png"><br>
195
+ <b face="雅黑"> A human can review and edit the output from the agent before proceeding. This is particularly critical in applications where the tool calls requested may be sensitive or require human oversight.</b>
196
+ </div>
197
+ <br>
198
+
199
+ - 🛠️ Tool Call Review: Humans can review, edit or approve tool call requests initiated by LLMs before execution
200
+ - ✅ Model Output Verification: Humans can review, edit or approve content generated by LLMs (text, decisions, etc.)
201
+ - 💡 Context Provision: Allows LLMs to actively request human input for clarification, additional details or multi-turn conversation context
202
+
203
+ ### Secure and Efficient Go➡Humanloop
204
+
205
+ `GoHumanloop` provides a set of tools deeply integrated within AI Agents to ensure constant `Human-in-the-loop` oversight. It deterministically ensures high-risk function calls must undergo human review while also enabling human expert feedback, thereby improving AI system reliability and safety while reducing risks from LLM hallucinations.
206
+
207
+ <div align="center">
208
+ <img height=420 src="http://cdn.oyster-iot.cloud/202505210943862.png"><br>
209
+ <b face="雅黑"> The Outer-Loop and Inversion of Control</b>
210
+ </div>
211
+ <br>
212
+
213
+ Through `GoHumanloop`'s encapsulation, you can implement secure and efficient `Human-in-the-loop` when requesting tools, Agent nodes, MCP services and other Agents.
214
+
215
+ ## 📚 Key Features
216
+
217
+ <div align="center">
218
+ <img height=360 src="http://cdn.oyster-iot.cloud/202505211030197.png"><br>
219
+ <b face="雅黑"> GoHumanLoop Architecture</b>
220
+ </div>
221
+ <br>
222
+
223
+ `GoHumanloop` offers the following core capabilities:
224
+
225
+ - **Approval:** Requests human review or approval when executing specific tool calls or Agent nodes
226
+ - **Information:** Obtains critical human input during task execution to reduce LLM hallucination risks
227
+ - **Conversation:** Enables multi-turn interactions with humans through dialogue to acquire richer contextual information
228
+ - **Framework-specific Integration:** Provides specialized integration methods for specific Agent frameworks, such as `interrupt` and `resume` for `LangGraph`
229
+
230
+ ## 📅 Roadmap
231
+
232
+ | Feature | Status |
233
+ | ----------------- | ---------- |
234
+ | Approval | ⚙️ Beta |
235
+ | Information | ⚙️ Beta |
236
+ | Conversation | ⚙️ Beta |
237
+ | Email Provider | ⚙️ Beta |
238
+ | Terminal Provider | ⚙️ Beta |
239
+ | API Provider | ⚙️ Beta |
240
+ | Default Manager | ⚙️ Beta |
241
+ | GLH Manager | 🗓️ Planned |
242
+ | Langchain Support | ⚙️ Beta |
243
+ | CrewAI Support | 🗓️ Planned |
244
+
245
+ - 💡 GLH Manager - GoHumanLoop Manager will integrate with the upcoming GoHumanLoop Hub platform to provide users with more flexible management options.
246
+
247
+ ## 🤝 Contributing
248
+
249
+ The GoHumanLoop SDK and documentation are open source. We welcome contributions in the form of issues, documentation and PRs. For more details, please see [CONTRIBUTING.md](./CONTRIBUTING.md)
250
+
251
+ ## 📱 Contact
252
+
253
+ <img height=300 src="http://cdn.oyster-iot.cloud/202505231802103.png"/>
254
+
255
+ 🎉 If you're interested in this project, feel free to scan the QR code to contact the author.
256
+
257
+ ## 🌟 Star History
258
+
259
+ [![Star History Chart](https://api.star-history.com/svg?repos=gohumanloop/gohumanloop&type=Date)](https://www.star-history.com/#gohumanloop/gohumanloop&Date)
@@ -0,0 +1,236 @@
1
+ <div align="center">
2
+
3
+ ![Wordmark Logo of HumanLayer](./docs/images/wordmark.png)
4
+ <b face="雅黑">Perfecting AI workflows with human intelligence</b>
5
+
6
+ </div>
7
+
8
+ **GoHumanLoop**: A Python library empowering AI agents to dynamically request human input (approval/feedback/conversation) at critical stages. Core features:
9
+
10
+ - `Human-in-the-loop control`: Lets AI agent systems pause and escalate decisions, enhancing safety and trust.
11
+ - `Multi-channel integration`: Supports Terminal, Email, API, and frameworks like LangGraph/CrewAI (soon).
12
+ - `Flexible workflows`: Combines automated reasoning with human oversight for reliable AI operations.
13
+
14
+ Ensures responsible AI deployment by bridging autonomous agents and human judgment.
15
+
16
+ <div align="center">
17
+ <img alt="Repostart" src="https://img.shields.io/github/stars/ptonlix/gohumanloop"/>
18
+ <img alt=" Python" src="https://img.shields.io/badge/Python-3.10%2B-blue"/>
19
+ <img alt="license" src="https://img.shields.io/badge/license-MIT-green"/>
20
+
21
+ [简体中文](README-zh.md) | English
22
+
23
+ </div>
24
+
25
+ ## Table of contents
26
+
27
+ - [Getting Started](#getting-started)
28
+ - [Why GoHumanloop?](#why-humanlayer)
29
+ - [Key Features](#key-features)
30
+ - [Examples](#examples)
31
+ - [Roadmap](#roadmap)
32
+ - [Contributing](#contributing)
33
+ - [License](#license)
34
+
35
+ ## 🎹 Getting Started
36
+
37
+ To get started, check out the following example or jump straight into one of the [Examples](./examples/):
38
+
39
+ - 🦜⛓️ [LangGraph](./examples/langgraph/)
40
+
41
+ ### Example
42
+
43
+ **GoHumanLoop** currently supports `Python`.
44
+
45
+ - Installation
46
+
47
+ ```shell
48
+ pip install gohumanloop
49
+ ```
50
+
51
+ - Example
52
+
53
+ The following example enhances [the official LangGraph example](https://langchain-ai.github.io/langgraph/tutorials/get-started/4-human-in-the-loop/#5-resume-execution) with `human-in-the-loop` functionality.
54
+
55
+ > 💡 By default, it uses `Terminal` as the `langgraph_adapter` for human interaction.
56
+
57
+ ```python
58
+ import os
59
+ from langchain.chat_models import init_chat_model
60
+ from typing import Annotated
61
+
62
+ from langchain_tavily import TavilySearch
63
+ from langchain_core.tools import tool
64
+ from typing_extensions import TypedDict
65
+
66
+ from langgraph.checkpoint.memory import MemorySaver
67
+ from langgraph.graph import StateGraph, START, END
68
+ from langgraph.graph.message import add_messages
69
+ from langgraph.prebuilt import ToolNode, tools_condition
70
+
71
+ # from langgraph.types import Command, interrupt # Don't use langgraph, use gohumanloop instead
72
+
73
+ from gohumanloop.adapters.langgraph_adapter import interrupt, create_resume_command
74
+
75
+ os.environ["DEEPSEEK_API_KEY"] = "sk-xxx"
76
+ os.environ["TAVILY_API_KEY"] = "tvly-xxx"
77
+
78
+ llm = init_chat_model("deepseek:deepseek-chat")
79
+
80
+ class State(TypedDict):
81
+ messages: Annotated[list, add_messages]
82
+
83
+ graph_builder = StateGraph(State)
84
+
85
+ @tool
86
+ def human_assistance(query: str) -> str:
87
+ """Request assistance from a human."""
88
+ human_response = interrupt({"query": query})
89
+ return human_response
90
+
91
+ tool = TavilySearch(max_results=2)
92
+ tools = [tool, human_assistance]
93
+ llm_with_tools = llm.bind_tools(tools)
94
+
95
+ def chatbot(state: State):
96
+ message = llm_with_tools.invoke(state["messages"])
97
+ # Because we will be interrupting during tool execution,
98
+ # we disable parallel tool calling to avoid repeating any
99
+ # tool invocations when we resume.
100
+ assert len(message.tool_calls) <= 1
101
+ return {"messages": [message]}
102
+
103
+ graph_builder.add_node("chatbot", chatbot)
104
+
105
+ tool_node = ToolNode(tools=tools)
106
+ graph_builder.add_node("tools", tool_node)
107
+
108
+ graph_builder.add_conditional_edges(
109
+ "chatbot",
110
+ tools_condition,
111
+ )
112
+ graph_builder.add_edge("tools", "chatbot")
113
+ graph_builder.add_edge(START, "chatbot")
114
+
115
+ memory = MemorySaver()
116
+
117
+ graph = graph_builder.compile(checkpointer=memory)
118
+
119
+ user_input = "I need some expert guidance for building an AI agent. Could you request assistance for me?"
120
+ config = {"configurable": {"thread_id": "1"}}
121
+
122
+ events = graph.stream(
123
+ {"messages": [{"role": "user", "content": user_input}]},
124
+ config,
125
+ stream_mode="values",
126
+ )
127
+ for event in events:
128
+ if "messages" in event:
129
+ event["messages"][-1].pretty_print()
130
+
131
+ # LangGraph code:
132
+ # human_response = (
133
+ # "We, the experts are here to help! We'd recommend you check out LangGraph to build your agent."
134
+ # "It's much more reliable and extensible than simple autonomous agents."
135
+ # )
136
+
137
+ # human_command = Command(resume={"data": human_response})
138
+
139
+ # GoHumanLoop code:
140
+ human_command = create_resume_command() # Use this command to resume the execution,instead of using the command above
141
+
142
+ events = graph.stream(human_command, config, stream_mode="values")
143
+ for event in events:
144
+ if "messages" in event:
145
+ event["messages"][-1].pretty_print()
146
+
147
+ ```
148
+
149
+ ## 🎵 Why GoHumanloop?
150
+
151
+ ### Human-in-the-loop
152
+
153
+ <div align="center">
154
+ <img height=240 src="http://cdn.oyster-iot.cloud/202505210851404.png"><br>
155
+ <b face="雅黑">Even with state-of-the-art agentic reasoning and prompt routing, LLMs are not sufficiently reliable to be given access to high-stakes functions without human oversight</b>
156
+ </div>
157
+ <br>
158
+
159
+ `Human-in-the-loop` is an AI system design philosophy that integrates human judgment and supervision into AI decision-making processes. This concept is particularly important in AI Agent systems:
160
+
161
+ - **Safety Assurance**: Allows human intervention and review at critical decision points to prevent potentially harmful AI decisions
162
+ - **Quality Control**: Improves accuracy and reliability of AI outputs through expert feedback
163
+ - **Continuous Learning**: AI systems can learn and improve from human feedback, creating a virtuous cycle
164
+ - **Clear Accountability**: Maintains ultimate human control over important decisions with clear responsibility
165
+
166
+ In practice, Human-in-the-loop can take various forms - from simple decision confirmation to deep human-AI collaborative dialogues - ensuring optimal balance between autonomy and human oversight to maximize the potential of AI Agent systems.
167
+
168
+ #### Typical Use Cases
169
+
170
+ <div align="center">
171
+ <img height=120 src="http://cdn.oyster-iot.cloud/tool-call-review.png"><br>
172
+ <b face="雅黑"> A human can review and edit the output from the agent before proceeding. This is particularly critical in applications where the tool calls requested may be sensitive or require human oversight.</b>
173
+ </div>
174
+ <br>
175
+
176
+ - 🛠️ Tool Call Review: Humans can review, edit or approve tool call requests initiated by LLMs before execution
177
+ - ✅ Model Output Verification: Humans can review, edit or approve content generated by LLMs (text, decisions, etc.)
178
+ - 💡 Context Provision: Allows LLMs to actively request human input for clarification, additional details or multi-turn conversation context
179
+
180
+ ### Secure and Efficient Go➡Humanloop
181
+
182
+ `GoHumanloop` provides a set of tools deeply integrated within AI Agents to ensure constant `Human-in-the-loop` oversight. It deterministically ensures high-risk function calls must undergo human review while also enabling human expert feedback, thereby improving AI system reliability and safety while reducing risks from LLM hallucinations.
183
+
184
+ <div align="center">
185
+ <img height=420 src="http://cdn.oyster-iot.cloud/202505210943862.png"><br>
186
+ <b face="雅黑"> The Outer-Loop and Inversion of Control</b>
187
+ </div>
188
+ <br>
189
+
190
+ Through `GoHumanloop`'s encapsulation, you can implement secure and efficient `Human-in-the-loop` when requesting tools, Agent nodes, MCP services and other Agents.
191
+
192
+ ## 📚 Key Features
193
+
194
+ <div align="center">
195
+ <img height=360 src="http://cdn.oyster-iot.cloud/202505211030197.png"><br>
196
+ <b face="雅黑"> GoHumanLoop Architecture</b>
197
+ </div>
198
+ <br>
199
+
200
+ `GoHumanloop` offers the following core capabilities:
201
+
202
+ - **Approval:** Requests human review or approval when executing specific tool calls or Agent nodes
203
+ - **Information:** Obtains critical human input during task execution to reduce LLM hallucination risks
204
+ - **Conversation:** Enables multi-turn interactions with humans through dialogue to acquire richer contextual information
205
+ - **Framework-specific Integration:** Provides specialized integration methods for specific Agent frameworks, such as `interrupt` and `resume` for `LangGraph`
206
+
207
+ ## 📅 Roadmap
208
+
209
+ | Feature | Status |
210
+ | ----------------- | ---------- |
211
+ | Approval | ⚙️ Beta |
212
+ | Information | ⚙️ Beta |
213
+ | Conversation | ⚙️ Beta |
214
+ | Email Provider | ⚙️ Beta |
215
+ | Terminal Provider | ⚙️ Beta |
216
+ | API Provider | ⚙️ Beta |
217
+ | Default Manager | ⚙️ Beta |
218
+ | GLH Manager | 🗓️ Planned |
219
+ | Langchain Support | ⚙️ Beta |
220
+ | CrewAI Support | 🗓️ Planned |
221
+
222
+ - 💡 GLH Manager - GoHumanLoop Manager will integrate with the upcoming GoHumanLoop Hub platform to provide users with more flexible management options.
223
+
224
+ ## 🤝 Contributing
225
+
226
+ The GoHumanLoop SDK and documentation are open source. We welcome contributions in the form of issues, documentation and PRs. For more details, please see [CONTRIBUTING.md](./CONTRIBUTING.md)
227
+
228
+ ## 📱 Contact
229
+
230
+ <img height=300 src="http://cdn.oyster-iot.cloud/202505231802103.png"/>
231
+
232
+ 🎉 If you're interested in this project, feel free to scan the QR code to contact the author.
233
+
234
+ ## 🌟 Star History
235
+
236
+ [![Star History Chart](https://api.star-history.com/svg?repos=gohumanloop/gohumanloop&type=Date)](https://www.star-history.com/#gohumanloop/gohumanloop&Date)
@@ -12,24 +12,32 @@ from gohumanloop.manager.ghl_manager import GoHumanLoopManager
12
12
 
13
13
  from gohumanloop.providers.ghl_provider import GoHumanLoopProvider
14
14
  from gohumanloop.providers.api_provider import APIProvider
15
- from gohumanloop.providers.email_provider import EmailProvider
16
15
  from gohumanloop.providers.base import BaseProvider
17
16
  from gohumanloop.providers.terminal_provider import TerminalProvider
18
17
 
19
18
  from gohumanloop.utils import run_async_safely, get_secret_from_env
20
19
 
20
+ # Conditionally import EmailProvider
21
+ try:
22
+ from gohumanloop.providers.email_provider import EmailProvider # noqa: F401
23
+
24
+ _has_email = True
25
+ except ImportError:
26
+ _has_email = False
27
+
21
28
  # Dynamically get version number
22
29
  try:
23
30
  from importlib.metadata import version, PackageNotFoundError
31
+
24
32
  try:
25
33
  __version__ = version("gohumanloop")
26
34
  except PackageNotFoundError:
27
35
  import os
28
36
  import tomli
29
-
37
+
30
38
  root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
31
39
  pyproject_path = os.path.join(root_dir, "pyproject.toml")
32
-
40
+
33
41
  with open(pyproject_path, "rb") as f:
34
42
  pyproject_data = tomli.load(f)
35
43
  __version__ = pyproject_data["project"]["version"]
@@ -44,22 +52,20 @@ __all__ = [
44
52
  "HumanLoopResult",
45
53
  "HumanLoopStatus",
46
54
  "HumanLoopType",
47
-
48
55
  # Manager Implementations
49
56
  "DefaultHumanLoopManager",
50
57
  "GoHumanLoopManager",
51
-
52
58
  # Provider Implementations
53
59
  "BaseProvider",
54
60
  "APIProvider",
55
61
  "GoHumanLoopProvider",
56
- "EmailProvider",
57
62
  "TerminalProvider",
58
-
59
63
  # Utility Functions
60
64
  "run_async_safely",
61
65
  "get_secret_from_env",
62
-
63
66
  # Version Information
64
67
  "__version__",
65
- ]
68
+ ]
69
+
70
+ if _has_email:
71
+ __all__.append("EmailProvider")
@@ -4,8 +4,8 @@ from .langgraph_adapter import (
4
4
  default_langgraph_callback_factory,
5
5
  interrupt,
6
6
  create_resume_command,
7
- acreate_resume_command
8
- )
7
+ acreate_resume_command,
8
+ )
9
9
 
10
10
  __all__ = [
11
11
  "LangGraphAdapter",
@@ -13,5 +13,5 @@ __all__ = [
13
13
  "default_langgraph_callback_factory",
14
14
  "interrupt",
15
15
  "create_resume_command",
16
- "acreate_resume_command"
17
- ]
16
+ "acreate_resume_command",
17
+ ]