haive-hap 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. haive_hap-1.0.0/LICENSE +21 -0
  2. haive_hap-1.0.0/PKG-INFO +309 -0
  3. haive_hap-1.0.0/README.md +290 -0
  4. haive_hap-1.0.0/pyproject.toml +72 -0
  5. haive_hap-1.0.0/src/haive/hap/README.md +118 -0
  6. haive_hap-1.0.0/src/haive/hap/__init__.py +66 -0
  7. haive_hap-1.0.0/src/haive/hap/cli/__init__.py +0 -0
  8. haive_hap-1.0.0/src/haive/hap/client/__init__.py +5 -0
  9. haive_hap-1.0.0/src/haive/hap/client/base.py +8 -0
  10. haive_hap-1.0.0/src/haive/hap/client/http.py +31 -0
  11. haive_hap-1.0.0/src/haive/hap/client/local.py +21 -0
  12. haive_hap-1.0.0/src/haive/hap/context.py +295 -0
  13. haive_hap-1.0.0/src/haive/hap/models/README.md +86 -0
  14. haive_hap-1.0.0/src/haive/hap/models/__init__.py +12 -0
  15. haive_hap-1.0.0/src/haive/hap/models/context.py +69 -0
  16. haive_hap-1.0.0/src/haive/hap/models/graph.py +184 -0
  17. haive_hap-1.0.0/src/haive/hap/models/manifest.py +11 -0
  18. haive_hap-1.0.0/src/haive/hap/server/README.md +103 -0
  19. haive_hap-1.0.0/src/haive/hap/server/__init__.py +0 -0
  20. haive_hap-1.0.0/src/haive/hap/server/runtime.py +87 -0
  21. haive_hap-1.0.0/src/haive/hap/server/typing.py +5 -0
  22. haive_hap-1.0.0/src/haive/hap/server.py +535 -0
  23. haive_hap-1.0.0/src/haive/hap/servers/__init__.py +9 -0
  24. haive_hap-1.0.0/src/haive/hap/servers/agent.py +421 -0
  25. haive_hap-1.0.0/src/haive/hap/transports/__init__.py +10 -0
  26. haive_hap-1.0.0/src/haive/hap/transports/stdio.py +267 -0
  27. haive_hap-1.0.0/src/haive/hap/types/__init__.py +55 -0
  28. haive_hap-1.0.0/src/haive/hap/types/agents.py +186 -0
  29. haive_hap-1.0.0/src/haive/hap/types/aliases.py +4 -0
  30. haive_hap-1.0.0/src/haive/hap/types/engines.py +123 -0
  31. haive_hap-1.0.0/src/haive/hap/types/graphs.py +177 -0
  32. haive_hap-1.0.0/src/haive/hap/types/nodes.py +167 -0
  33. haive_hap-1.0.0/src/haive/hap/types/protocol.py +978 -0
  34. haive_hap-1.0.0/src/haive/hap/types/schemas.py +176 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Will
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,309 @@
1
+ Metadata-Version: 2.3
2
+ Name: haive-hap
3
+ Version: 1.0.0
4
+ Summary: Haive Agent Protocol - MCP for Agents
5
+ Author: 0rac130fD31phi
6
+ Author-email: william.astley@algebraicwealth.com
7
+ Requires-Python: >=3.12,<3.13
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: Programming Language :: Python :: 3.12
10
+ Requires-Dist: aiofiles (>=23.0,<24.0)
11
+ Requires-Dist: aiohttp (>=3.9,<4.0)
12
+ Requires-Dist: click (>=8.1,<9.0)
13
+ Requires-Dist: haive-agents (>=1.0.0,<2.0.0)
14
+ Requires-Dist: haive-core (>=1.0.0,<2.0.0)
15
+ Requires-Dist: pydantic (>=2.0,<3.0)
16
+ Requires-Dist: rich (>=13.0)
17
+ Requires-Dist: typing-extensions (>=4.8,<5.0)
18
+ Description-Content-Type: text/markdown
19
+
20
+ # HAP - Haive Agent Protocol
21
+
22
+ **"MCP for Agents"** - A protocol for orchestrating multiple AI agents in complex workflows.
23
+
24
+ ## ๐ŸŽฏ Overview
25
+
26
+ HAP (Haive Agent Protocol) enables seamless orchestration of multiple AI agents, similar to how MCP (Model Context Protocol) exposes tools and resources. It provides a standardized way to coordinate agents working together on complex multi-step problems that no single agent can solve alone.
27
+
28
+ ### Key Features
29
+
30
+ - ๐Ÿ”„ **Multi-Agent Orchestration** - Sequential, parallel, and conditional workflows
31
+ - ๐ŸŒ **Protocol-Based Communication** - JSON-RPC 2.0 standard for remote agents
32
+ - ๐Ÿ“Š **State Management** - Context flows seamlessly between agents
33
+ - ๐Ÿ› ๏ธ **Tool Integration** - Access individual agent tools directly
34
+ - ๐Ÿ“ˆ **Performance Monitoring** - Track execution times and statistics
35
+ - ๐Ÿ”Œ **Flexible Deployment** - Local in-process or distributed network execution
36
+
37
+ ## ๐Ÿš€ Quick Start
38
+
39
+ ### Basic Agent Workflow
40
+
41
+ ```python
42
+ from haive.hap.models.graph import HAPGraph
43
+ from haive.hap.server.runtime import HAPRuntime
44
+ from haive.agents.simple import SimpleAgent
45
+ from haive.core.engine.aug_llm import AugLLMConfig
46
+
47
+ # Create specialized agents
48
+ researcher = SimpleAgent(
49
+ name="researcher",
50
+ engine=AugLLMConfig(
51
+ temperature=0.7,
52
+ system_message="You gather comprehensive information on topics."
53
+ )
54
+ )
55
+
56
+ analyst = SimpleAgent(
57
+ name="analyst",
58
+ engine=AugLLMConfig(
59
+ temperature=0.3,
60
+ system_message="You analyze findings and identify key insights."
61
+ )
62
+ )
63
+
64
+ # Build workflow graph
65
+ graph = HAPGraph()
66
+ graph.add_agent_node("research", researcher, next_nodes=["analyze"])
67
+ graph.add_agent_node("analyze", analyst)
68
+ graph.entry_node = "research"
69
+
70
+ # Execute workflow
71
+ runtime = HAPRuntime(graph)
72
+ result = await runtime.run({"topic": "AI in Healthcare"})
73
+
74
+ print(f"Workflow path: {result.execution_path}")
75
+ print(f"Final output: {result.outputs}")
76
+ ```
77
+
78
+ ### Parallel Agent Execution
79
+
80
+ ```python
81
+ # Create agents for parallel analysis
82
+ sentiment_agent = SimpleAgent(name="sentiment", engine=sentiment_config)
83
+ topic_agent = SimpleAgent(name="topics", engine=topic_config)
84
+ summary_agent = SimpleAgent(name="summary", engine=summary_config)
85
+
86
+ # Build parallel workflow
87
+ graph = HAPGraph()
88
+ graph.add_agent_node("sentiment", sentiment_agent, next_nodes=["combine"])
89
+ graph.add_agent_node("topics", topic_agent, next_nodes=["combine"])
90
+ graph.add_agent_node("summary", summary_agent, next_nodes=["combine"])
91
+ graph.add_agent_node("combine", combiner_agent)
92
+
93
+ # Multiple entry points for parallel execution
94
+ graph.entry_node = ["sentiment", "topics", "summary"]
95
+
96
+ runtime = HAPRuntime(graph)
97
+ result = await runtime.run({"text": "Customer feedback data..."})
98
+ ```
99
+
100
+ ### Agent as Service
101
+
102
+ ```python
103
+ from haive.hap.servers.agent import AgentServer
104
+
105
+ # Wrap any agent as HAP server
106
+ server = AgentServer(
107
+ agent=my_agent,
108
+ expose_tools=True, # Expose individual tools
109
+ expose_state=True # Expose agent state
110
+ )
111
+
112
+ # Access agent capabilities
113
+ info = server.get_resource("agent://my_agent/info")
114
+ stats = server.get_resource("agent://my_agent/stats")
115
+
116
+ # Execute agent
117
+ result = await server.execute({
118
+ "input_data": {"query": "Process this request"},
119
+ "timeout": 30.0
120
+ })
121
+ ```
122
+
123
+ ## ๐Ÿ“ฆ Installation
124
+
125
+ ```bash
126
+ # Install with poetry
127
+ poetry add haive-hap
128
+
129
+ # Or with pip
130
+ pip install haive-hap
131
+ ```
132
+
133
+ ## ๐Ÿ—๏ธ Architecture
134
+
135
+ ### Core Components
136
+
137
+ 1. **Graph Definition** (`models/graph.py`)
138
+ - `HAPGraph` - Define agent workflows as directed graphs
139
+ - `HAPNode` - Individual workflow nodes (agents, tools, decisions)
140
+ - Support for sequential, parallel, and conditional execution
141
+
142
+ 2. **Runtime Engine** (`server/runtime.py`)
143
+ - `HAPRuntime` - Execute workflows with state management
144
+ - Automatic agent loading from entrypoints
145
+ - Error handling and recovery
146
+
147
+ 3. **Protocol Layer** (`types/protocol.py`)
148
+ - JSON-RPC 2.0 compliant messaging
149
+ - Comprehensive Pydantic validation
150
+ - Standard error codes and handling
151
+
152
+ 4. **Agent Server** (`servers/agent.py`)
153
+ - Expose any Haive agent as HAP service
154
+ - Tool-level granularity
155
+ - Resource endpoints for state and config
156
+
157
+ ## ๐Ÿ“Š Real-World Use Cases
158
+
159
+ ### 1. Research Pipeline
160
+
161
+ ```python
162
+ # Research โ†’ Analysis โ†’ Report Generation
163
+ graph = HAPGraph()
164
+ graph.add_agent_node("research", research_agent, next_nodes=["analyze"])
165
+ graph.add_agent_node("analyze", analysis_agent, next_nodes=["report"])
166
+ graph.add_agent_node("report", report_agent)
167
+ ```
168
+
169
+ ### 2. Customer Support Workflow
170
+
171
+ ```python
172
+ # Classify โ†’ Route โ†’ Respond โ†’ Follow-up
173
+ graph = HAPGraph()
174
+ graph.add_agent_node("classify", classifier, next_nodes=["technical", "billing", "general"])
175
+ graph.add_agent_node("technical", tech_agent, next_nodes=["followup"])
176
+ graph.add_agent_node("billing", billing_agent, next_nodes=["followup"])
177
+ graph.add_agent_node("general", general_agent, next_nodes=["followup"])
178
+ graph.add_agent_node("followup", followup_agent)
179
+ ```
180
+
181
+ ### 3. Content Creation Pipeline
182
+
183
+ ```python
184
+ # Plan โ†’ Research โ†’ Write โ†’ Edit โ†’ Publish
185
+ graph = HAPGraph()
186
+ graph.add_agent_node("plan", planner, next_nodes=["research"])
187
+ graph.add_agent_node("research", researcher, next_nodes=["write"])
188
+ graph.add_agent_node("write", writer, next_nodes=["edit"])
189
+ graph.add_agent_node("edit", editor, next_nodes=["publish"])
190
+ graph.add_agent_node("publish", publisher)
191
+ ```
192
+
193
+ ## ๐Ÿ”ง Advanced Features
194
+
195
+ ### Dynamic Agent Loading
196
+
197
+ ```python
198
+ # Load agents from entrypoints (for distributed deployment)
199
+ graph = HAPGraph()
200
+ graph.add_entrypoint_node(
201
+ "analyzer",
202
+ "mypackage.agents:AnalyzerAgent",
203
+ config={"temperature": 0.3}
204
+ )
205
+ ```
206
+
207
+ ### Conditional Routing
208
+
209
+ ```python
210
+ # Route based on agent output
211
+ def routing_function(context):
212
+ score = context.outputs.get("confidence", 0)
213
+ return "expert" if score < 0.7 else "finalize"
214
+
215
+ graph.add_decision_node(
216
+ "router",
217
+ decision_func=routing_function,
218
+ next_nodes={"expert": "expert_agent", "finalize": "final_agent"}
219
+ )
220
+ ```
221
+
222
+ ### State Persistence
223
+
224
+ ```python
225
+ # Save and restore workflow state
226
+ runtime = HAPRuntime(graph, state_file="workflow_state.json")
227
+
228
+ # Resume from checkpoint
229
+ result = await runtime.resume_from_checkpoint()
230
+ ```
231
+
232
+ ## ๐Ÿ“ˆ Performance Monitoring
233
+
234
+ ```python
235
+ # Get execution statistics
236
+ stats = runtime.get_stats()
237
+ print(f"Total executions: {stats['execution_count']}")
238
+ print(f"Average time: {stats['average_execution_time']:.2f}s")
239
+
240
+ # Per-node performance
241
+ for node, metadata in result.node_metadata.items():
242
+ print(f"{node}: {metadata['execution_time']:.2f}s")
243
+ ```
244
+
245
+ ## ๐Ÿงช Testing
246
+
247
+ ```bash
248
+ # Run all tests (real agents, no mocks)
249
+ poetry run pytest
250
+
251
+ # Test specific components
252
+ poetry run pytest tests/test_graph_execution.py -v
253
+ poetry run pytest tests/test_agent_server.py -v
254
+ poetry run pytest tests/test_hap_workflow.py -v
255
+
256
+ # Integration tests
257
+ poetry run pytest tests/integration/ -v
258
+ ```
259
+
260
+ ## ๐Ÿ“š Documentation
261
+
262
+ ### Building Documentation
263
+
264
+ ```bash
265
+ # Build enhanced Sphinx docs
266
+ cd docs
267
+ poetry run sphinx-build -b html source build
268
+
269
+ # View locally
270
+ python -m http.server 8003 --directory build
271
+ ```
272
+
273
+ ### Documentation Features
274
+
275
+ - ๐ŸŽจ Enhanced Furo theme with purple/violet styling
276
+ - ๐Ÿ’ก Interactive tooltips and code examples
277
+ - ๐Ÿ“ฑ Mobile-optimized responsive design
278
+ - ๐Ÿ” Advanced search with syntax highlighting
279
+ - ๐Ÿ“– Hierarchical API reference organization
280
+
281
+ ## ๐Ÿค Contributing
282
+
283
+ 1. Fork the repository
284
+ 2. Create your feature branch (`git checkout -b feature/amazing-feature`)
285
+ 3. Write tests for your changes (no mocks!)
286
+ 4. Ensure all tests pass (`poetry run pytest`)
287
+ 5. Commit your changes (`git commit -m 'feat: add amazing feature'`)
288
+ 6. Push to the branch (`git push origin feature/amazing-feature`)
289
+ 7. Open a Pull Request
290
+
291
+ ## ๐Ÿ“„ License
292
+
293
+ MIT License - see [LICENSE](LICENSE) for details.
294
+
295
+ ## ๐Ÿ”ฎ Roadmap
296
+
297
+ - [ ] **v0.2.0** - WebSocket transport for real-time updates
298
+ - [ ] **v0.3.0** - Distributed workflow execution
299
+ - [ ] **v0.4.0** - Visual workflow builder UI
300
+ - [ ] **v0.5.0** - GraphQL API interface
301
+ - [ ] **v1.0.0** - Production-ready with performance optimizations
302
+
303
+ ## ๐Ÿ™ Acknowledgments
304
+
305
+ HAP is inspired by the Model Context Protocol (MCP) but adapted specifically for agent orchestration. Special thanks to the Haive framework team for providing the foundation for advanced agent development.
306
+
307
+ ---
308
+
309
+ **HAP** - Orchestrating AI agents with elegance and power. ๐Ÿš€
@@ -0,0 +1,290 @@
1
+ # HAP - Haive Agent Protocol
2
+
3
+ **"MCP for Agents"** - A protocol for orchestrating multiple AI agents in complex workflows.
4
+
5
+ ## ๐ŸŽฏ Overview
6
+
7
+ HAP (Haive Agent Protocol) enables seamless orchestration of multiple AI agents, similar to how MCP (Model Context Protocol) exposes tools and resources. It provides a standardized way to coordinate agents working together on complex multi-step problems that no single agent can solve alone.
8
+
9
+ ### Key Features
10
+
11
+ - ๐Ÿ”„ **Multi-Agent Orchestration** - Sequential, parallel, and conditional workflows
12
+ - ๐ŸŒ **Protocol-Based Communication** - JSON-RPC 2.0 standard for remote agents
13
+ - ๐Ÿ“Š **State Management** - Context flows seamlessly between agents
14
+ - ๐Ÿ› ๏ธ **Tool Integration** - Access individual agent tools directly
15
+ - ๐Ÿ“ˆ **Performance Monitoring** - Track execution times and statistics
16
+ - ๐Ÿ”Œ **Flexible Deployment** - Local in-process or distributed network execution
17
+
18
+ ## ๐Ÿš€ Quick Start
19
+
20
+ ### Basic Agent Workflow
21
+
22
+ ```python
23
+ from haive.hap.models.graph import HAPGraph
24
+ from haive.hap.server.runtime import HAPRuntime
25
+ from haive.agents.simple import SimpleAgent
26
+ from haive.core.engine.aug_llm import AugLLMConfig
27
+
28
+ # Create specialized agents
29
+ researcher = SimpleAgent(
30
+ name="researcher",
31
+ engine=AugLLMConfig(
32
+ temperature=0.7,
33
+ system_message="You gather comprehensive information on topics."
34
+ )
35
+ )
36
+
37
+ analyst = SimpleAgent(
38
+ name="analyst",
39
+ engine=AugLLMConfig(
40
+ temperature=0.3,
41
+ system_message="You analyze findings and identify key insights."
42
+ )
43
+ )
44
+
45
+ # Build workflow graph
46
+ graph = HAPGraph()
47
+ graph.add_agent_node("research", researcher, next_nodes=["analyze"])
48
+ graph.add_agent_node("analyze", analyst)
49
+ graph.entry_node = "research"
50
+
51
+ # Execute workflow
52
+ runtime = HAPRuntime(graph)
53
+ result = await runtime.run({"topic": "AI in Healthcare"})
54
+
55
+ print(f"Workflow path: {result.execution_path}")
56
+ print(f"Final output: {result.outputs}")
57
+ ```
58
+
59
+ ### Parallel Agent Execution
60
+
61
+ ```python
62
+ # Create agents for parallel analysis
63
+ sentiment_agent = SimpleAgent(name="sentiment", engine=sentiment_config)
64
+ topic_agent = SimpleAgent(name="topics", engine=topic_config)
65
+ summary_agent = SimpleAgent(name="summary", engine=summary_config)
66
+
67
+ # Build parallel workflow
68
+ graph = HAPGraph()
69
+ graph.add_agent_node("sentiment", sentiment_agent, next_nodes=["combine"])
70
+ graph.add_agent_node("topics", topic_agent, next_nodes=["combine"])
71
+ graph.add_agent_node("summary", summary_agent, next_nodes=["combine"])
72
+ graph.add_agent_node("combine", combiner_agent)
73
+
74
+ # Multiple entry points for parallel execution
75
+ graph.entry_node = ["sentiment", "topics", "summary"]
76
+
77
+ runtime = HAPRuntime(graph)
78
+ result = await runtime.run({"text": "Customer feedback data..."})
79
+ ```
80
+
81
+ ### Agent as Service
82
+
83
+ ```python
84
+ from haive.hap.servers.agent import AgentServer
85
+
86
+ # Wrap any agent as HAP server
87
+ server = AgentServer(
88
+ agent=my_agent,
89
+ expose_tools=True, # Expose individual tools
90
+ expose_state=True # Expose agent state
91
+ )
92
+
93
+ # Access agent capabilities
94
+ info = server.get_resource("agent://my_agent/info")
95
+ stats = server.get_resource("agent://my_agent/stats")
96
+
97
+ # Execute agent
98
+ result = await server.execute({
99
+ "input_data": {"query": "Process this request"},
100
+ "timeout": 30.0
101
+ })
102
+ ```
103
+
104
+ ## ๐Ÿ“ฆ Installation
105
+
106
+ ```bash
107
+ # Install with poetry
108
+ poetry add haive-hap
109
+
110
+ # Or with pip
111
+ pip install haive-hap
112
+ ```
113
+
114
+ ## ๐Ÿ—๏ธ Architecture
115
+
116
+ ### Core Components
117
+
118
+ 1. **Graph Definition** (`models/graph.py`)
119
+ - `HAPGraph` - Define agent workflows as directed graphs
120
+ - `HAPNode` - Individual workflow nodes (agents, tools, decisions)
121
+ - Support for sequential, parallel, and conditional execution
122
+
123
+ 2. **Runtime Engine** (`server/runtime.py`)
124
+ - `HAPRuntime` - Execute workflows with state management
125
+ - Automatic agent loading from entrypoints
126
+ - Error handling and recovery
127
+
128
+ 3. **Protocol Layer** (`types/protocol.py`)
129
+ - JSON-RPC 2.0 compliant messaging
130
+ - Comprehensive Pydantic validation
131
+ - Standard error codes and handling
132
+
133
+ 4. **Agent Server** (`servers/agent.py`)
134
+ - Expose any Haive agent as HAP service
135
+ - Tool-level granularity
136
+ - Resource endpoints for state and config
137
+
138
+ ## ๐Ÿ“Š Real-World Use Cases
139
+
140
+ ### 1. Research Pipeline
141
+
142
+ ```python
143
+ # Research โ†’ Analysis โ†’ Report Generation
144
+ graph = HAPGraph()
145
+ graph.add_agent_node("research", research_agent, next_nodes=["analyze"])
146
+ graph.add_agent_node("analyze", analysis_agent, next_nodes=["report"])
147
+ graph.add_agent_node("report", report_agent)
148
+ ```
149
+
150
+ ### 2. Customer Support Workflow
151
+
152
+ ```python
153
+ # Classify โ†’ Route โ†’ Respond โ†’ Follow-up
154
+ graph = HAPGraph()
155
+ graph.add_agent_node("classify", classifier, next_nodes=["technical", "billing", "general"])
156
+ graph.add_agent_node("technical", tech_agent, next_nodes=["followup"])
157
+ graph.add_agent_node("billing", billing_agent, next_nodes=["followup"])
158
+ graph.add_agent_node("general", general_agent, next_nodes=["followup"])
159
+ graph.add_agent_node("followup", followup_agent)
160
+ ```
161
+
162
+ ### 3. Content Creation Pipeline
163
+
164
+ ```python
165
+ # Plan โ†’ Research โ†’ Write โ†’ Edit โ†’ Publish
166
+ graph = HAPGraph()
167
+ graph.add_agent_node("plan", planner, next_nodes=["research"])
168
+ graph.add_agent_node("research", researcher, next_nodes=["write"])
169
+ graph.add_agent_node("write", writer, next_nodes=["edit"])
170
+ graph.add_agent_node("edit", editor, next_nodes=["publish"])
171
+ graph.add_agent_node("publish", publisher)
172
+ ```
173
+
174
+ ## ๐Ÿ”ง Advanced Features
175
+
176
+ ### Dynamic Agent Loading
177
+
178
+ ```python
179
+ # Load agents from entrypoints (for distributed deployment)
180
+ graph = HAPGraph()
181
+ graph.add_entrypoint_node(
182
+ "analyzer",
183
+ "mypackage.agents:AnalyzerAgent",
184
+ config={"temperature": 0.3}
185
+ )
186
+ ```
187
+
188
+ ### Conditional Routing
189
+
190
+ ```python
191
+ # Route based on agent output
192
+ def routing_function(context):
193
+ score = context.outputs.get("confidence", 0)
194
+ return "expert" if score < 0.7 else "finalize"
195
+
196
+ graph.add_decision_node(
197
+ "router",
198
+ decision_func=routing_function,
199
+ next_nodes={"expert": "expert_agent", "finalize": "final_agent"}
200
+ )
201
+ ```
202
+
203
+ ### State Persistence
204
+
205
+ ```python
206
+ # Save and restore workflow state
207
+ runtime = HAPRuntime(graph, state_file="workflow_state.json")
208
+
209
+ # Resume from checkpoint
210
+ result = await runtime.resume_from_checkpoint()
211
+ ```
212
+
213
+ ## ๐Ÿ“ˆ Performance Monitoring
214
+
215
+ ```python
216
+ # Get execution statistics
217
+ stats = runtime.get_stats()
218
+ print(f"Total executions: {stats['execution_count']}")
219
+ print(f"Average time: {stats['average_execution_time']:.2f}s")
220
+
221
+ # Per-node performance
222
+ for node, metadata in result.node_metadata.items():
223
+ print(f"{node}: {metadata['execution_time']:.2f}s")
224
+ ```
225
+
226
+ ## ๐Ÿงช Testing
227
+
228
+ ```bash
229
+ # Run all tests (real agents, no mocks)
230
+ poetry run pytest
231
+
232
+ # Test specific components
233
+ poetry run pytest tests/test_graph_execution.py -v
234
+ poetry run pytest tests/test_agent_server.py -v
235
+ poetry run pytest tests/test_hap_workflow.py -v
236
+
237
+ # Integration tests
238
+ poetry run pytest tests/integration/ -v
239
+ ```
240
+
241
+ ## ๐Ÿ“š Documentation
242
+
243
+ ### Building Documentation
244
+
245
+ ```bash
246
+ # Build enhanced Sphinx docs
247
+ cd docs
248
+ poetry run sphinx-build -b html source build
249
+
250
+ # View locally
251
+ python -m http.server 8003 --directory build
252
+ ```
253
+
254
+ ### Documentation Features
255
+
256
+ - ๐ŸŽจ Enhanced Furo theme with purple/violet styling
257
+ - ๐Ÿ’ก Interactive tooltips and code examples
258
+ - ๐Ÿ“ฑ Mobile-optimized responsive design
259
+ - ๐Ÿ” Advanced search with syntax highlighting
260
+ - ๐Ÿ“– Hierarchical API reference organization
261
+
262
+ ## ๐Ÿค Contributing
263
+
264
+ 1. Fork the repository
265
+ 2. Create your feature branch (`git checkout -b feature/amazing-feature`)
266
+ 3. Write tests for your changes (no mocks!)
267
+ 4. Ensure all tests pass (`poetry run pytest`)
268
+ 5. Commit your changes (`git commit -m 'feat: add amazing feature'`)
269
+ 6. Push to the branch (`git push origin feature/amazing-feature`)
270
+ 7. Open a Pull Request
271
+
272
+ ## ๐Ÿ“„ License
273
+
274
+ MIT License - see [LICENSE](LICENSE) for details.
275
+
276
+ ## ๐Ÿ”ฎ Roadmap
277
+
278
+ - [ ] **v0.2.0** - WebSocket transport for real-time updates
279
+ - [ ] **v0.3.0** - Distributed workflow execution
280
+ - [ ] **v0.4.0** - Visual workflow builder UI
281
+ - [ ] **v0.5.0** - GraphQL API interface
282
+ - [ ] **v1.0.0** - Production-ready with performance optimizations
283
+
284
+ ## ๐Ÿ™ Acknowledgments
285
+
286
+ HAP is inspired by the Model Context Protocol (MCP) but adapted specifically for agent orchestration. Special thanks to the Haive framework team for providing the foundation for advanced agent development.
287
+
288
+ ---
289
+
290
+ **HAP** - Orchestrating AI agents with elegance and power. ๐Ÿš€
@@ -0,0 +1,72 @@
1
+ [tool.poetry]
2
+ name = "haive-hap"
3
+ version = "1.0.0"
4
+ description = "Haive Agent Protocol - MCP for Agents"
5
+ authors = ["0rac130fD31phi <william.astley@algebraicwealth.com>"]
6
+ readme = "README.md"
7
+ packages = [{ include = "haive", from = "src" }]
8
+
9
+ [tool.poetry.dependencies]
10
+ python = ">=3.12,<3.13"
11
+ pydantic = "^2.0"
12
+ typing-extensions = "^4.8"
13
+ aiofiles = "^23.0"
14
+ aiohttp = "^3.9"
15
+ click = "^8.1"
16
+ rich = ">=13.0"
17
+ # Editable dependencies for monorepo cross-package imports
18
+ haive-core = "^1.0.0"
19
+ haive-agents = "^1.0.0"
20
+
21
+ [tool.poetry.group.dev.dependencies]
22
+ absolufy-imports = "^0.3.1"
23
+ pytest = "^7.4"
24
+ pytest-asyncio = "^0.21"
25
+ pytest-mock = "^3.12"
26
+ httpx = "^0.28"
27
+ rope = "^1.14.0"
28
+
29
+ [tool.poetry.group.docs]
30
+ optional = true
31
+
32
+ [tool.poetry.group.docs.dependencies]
33
+ # Documentation dependencies
34
+ sphinx = "^8.2.3"
35
+ sphinx-autoapi = "^3.6.0"
36
+ sphinx-autodoc-typehints = "^3.1.0"
37
+ furo = "^2024.8.6"
38
+ myst-parser = "^4.0.1"
39
+ sphinx-copybutton = "^0.5.2"
40
+ sphinx-togglebutton = "^0.3.2"
41
+ sphinx-design = "^0.6.1"
42
+ sphinx-tabs = "^3.4.5"
43
+ sphinx-inline-tabs = "^2023.4.21"
44
+ sphinxcontrib-mermaid = "^1.0.0"
45
+ sphinxcontrib-plantuml = "^0.30"
46
+ sphinxcontrib-blockdiag = "^3.0.0"
47
+ sphinxcontrib-seqdiag = "^3.0.0"
48
+ sphinx-codeautolink = "^0.17.0"
49
+ sphinx-exec-code = "^0.16"
50
+ sphinx-runpython = "^0.4.0"
51
+ sphinx-sitemap = "^2.6.0"
52
+ sphinx-last-updated-by-git = "^0.3.8"
53
+ sphinxext-opengraph = "^0.10.0"
54
+ sphinx-reredirects = "^1.0.0"
55
+ sphinx-favicon = "^1.0.1"
56
+ sphinxemoji = "^0.3.1"
57
+ sphinx-tippy = "^0.4.3"
58
+ sphinx-notfound-page = "^1.0.4"
59
+ # sphinx-toggleprompt = "^0.5.2" # Incompatible with Sphinx 8
60
+ sphinx-issues = "^5.0.0"
61
+ sphinx-git = "^11.0.0"
62
+ sphinx-changelog = "^1.6.0"
63
+ sphinx-prompt = "^1.9.0"
64
+ enum-tools = "^0.13.0"
65
+ sphinx-toolbox = "^3.8.1"
66
+ seed-intersphinx-mapping = "^1.2.2"
67
+ autodoc-pydantic = "^2.2.0"
68
+ sphinxcontrib-programoutput = "^0.17"
69
+
70
+ [build-system]
71
+ requires = ["poetry-core>=2.0.0,<3.0.0"]
72
+ build-backend = "poetry.core.masonry.api"