a2a-adapter 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- a2a_adapter/__init__.py +42 -0
- a2a_adapter/adapter.py +185 -0
- a2a_adapter/client.py +236 -0
- a2a_adapter/integrations/__init__.py +33 -0
- a2a_adapter/integrations/callable.py +172 -0
- a2a_adapter/integrations/crewai.py +142 -0
- a2a_adapter/integrations/langchain.py +171 -0
- a2a_adapter/integrations/n8n.py +787 -0
- a2a_adapter/loader.py +131 -0
- a2a_adapter-0.1.0.dist-info/METADATA +604 -0
- a2a_adapter-0.1.0.dist-info/RECORD +14 -0
- a2a_adapter-0.1.0.dist-info/WHEEL +5 -0
- a2a_adapter-0.1.0.dist-info/licenses/LICENSE +201 -0
- a2a_adapter-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,604 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: a2a-adapter
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A2A Protocol Adapter SDK for integrating various agent frameworks
|
|
5
|
+
Author-email: HYBRO AI <info@hybro.ai>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/hybro-ai/a2a-adapter
|
|
8
|
+
Project-URL: Documentation, https://github.com/hybro-ai/a2a-adapter#readme
|
|
9
|
+
Project-URL: Repository, https://github.com/hybro-ai/a2a-adapter
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
17
|
+
Requires-Python: >=3.11
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
License-File: LICENSE
|
|
20
|
+
Requires-Dist: a2a>=0.44.0
|
|
21
|
+
Requires-Dist: a2a-sdk[http-server]>=0.3.0
|
|
22
|
+
Requires-Dist: uvicorn>=0.27.0
|
|
23
|
+
Requires-Dist: httpx>=0.26.0
|
|
24
|
+
Requires-Dist: pydantic>=2.0.0
|
|
25
|
+
Provides-Extra: n8n
|
|
26
|
+
Provides-Extra: crewai
|
|
27
|
+
Requires-Dist: crewai>=0.1.0; extra == "crewai"
|
|
28
|
+
Provides-Extra: langchain
|
|
29
|
+
Requires-Dist: langchain>=0.1.0; extra == "langchain"
|
|
30
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "langchain"
|
|
31
|
+
Provides-Extra: langgraph
|
|
32
|
+
Requires-Dist: langgraph>=0.0.1; extra == "langgraph"
|
|
33
|
+
Provides-Extra: all
|
|
34
|
+
Requires-Dist: crewai>=0.1.0; extra == "all"
|
|
35
|
+
Requires-Dist: langchain>=0.1.0; extra == "all"
|
|
36
|
+
Requires-Dist: langchain-core>=0.1.0; extra == "all"
|
|
37
|
+
Requires-Dist: langgraph>=0.0.1; extra == "all"
|
|
38
|
+
Provides-Extra: dev
|
|
39
|
+
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
40
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
41
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
42
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
43
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
44
|
+
Dynamic: license-file
|
|
45
|
+
|
|
46
|
+
# A2A Adapter
|
|
47
|
+
|
|
48
|
+
[](https://opensource.org/licenses/MIT)
|
|
49
|
+
[](https://www.python.org/downloads/)
|
|
50
|
+
|
|
51
|
+
**Open Source A2A Protocol Adapter SDK for Different Agent Frameworks**
|
|
52
|
+
|
|
53
|
+
A Python SDK that enables seamless integration of various agent frameworks (n8n, CrewAI, LangChain, etc.) with the [A2A (Agent-to-Agent) Protocol](https://github.com/a2a-protocol/a2a-protocol). Build interoperable AI agent systems that can communicate across different platforms and frameworks.
|
|
54
|
+
|
|
55
|
+
## Features
|
|
56
|
+
|
|
57
|
+
✨ **Framework Agnostic**: Integrate n8n workflows, CrewAI crews, LangChain chains, or custom agents
|
|
58
|
+
🔌 **Simple API**: 3-line setup to expose any agent as A2A-compliant
|
|
59
|
+
🌊 **Streaming Support**: Built-in streaming for LangChain and custom adapters
|
|
60
|
+
🎯 **Type Safe**: Leverages official A2A SDK types
|
|
61
|
+
🔧 **Extensible**: Easy to add custom adapters for new frameworks
|
|
62
|
+
📦 **Minimal Dependencies**: Optional dependencies per framework
|
|
63
|
+
|
|
64
|
+
## Architecture
|
|
65
|
+
|
|
66
|
+
```
|
|
67
|
+
┌─────────────────┐
|
|
68
|
+
│ A2A Caller │ (Other A2A Agents)
|
|
69
|
+
└────────┬────────┘
|
|
70
|
+
│ A2A Protocol (HTTP + JSON-RPC 2.0)
|
|
71
|
+
▼
|
|
72
|
+
┌─────────────────┐
|
|
73
|
+
│ A2A Adapter │ (This SDK)
|
|
74
|
+
│ - N8n │
|
|
75
|
+
│ - CrewAI │
|
|
76
|
+
│ - LangChain │
|
|
77
|
+
│ - Custom │
|
|
78
|
+
└────────┬────────┘
|
|
79
|
+
│
|
|
80
|
+
▼
|
|
81
|
+
┌─────────────────┐
|
|
82
|
+
│ Your Agent │ (n8n workflow / CrewAI crew / Chain)
|
|
83
|
+
└─────────────────┘
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
**Single-Agent Design**: Each server hosts exactly one agent. Multi-agent orchestration is handled externally via A2A protocol or orchestration frameworks like LangGraph.
|
|
87
|
+
|
|
88
|
+
See [ARCHITECTURE.md](ARCHITECTURE.md) for detailed design documentation.
|
|
89
|
+
|
|
90
|
+
## Installation
|
|
91
|
+
|
|
92
|
+
### Basic Installation
|
|
93
|
+
|
|
94
|
+
```bash
|
|
95
|
+
pip install a2a-adapter
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
### With Framework Support
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
# For n8n (HTTP webhooks)
|
|
102
|
+
pip install a2a-adapter
|
|
103
|
+
|
|
104
|
+
# For CrewAI
|
|
105
|
+
pip install a2a-adapter[crewai]
|
|
106
|
+
|
|
107
|
+
# For LangChain
|
|
108
|
+
pip install a2a-adapter[langchain]
|
|
109
|
+
|
|
110
|
+
# For LangGraph
|
|
111
|
+
pip install a2a-adapter[langgraph]
|
|
112
|
+
|
|
113
|
+
# Install all frameworks
|
|
114
|
+
pip install a2a-adapter[all]
|
|
115
|
+
|
|
116
|
+
# For development
|
|
117
|
+
pip install a2a-adapter[dev]
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## Quick Start
|
|
121
|
+
|
|
122
|
+
### 🚀 Easy Start with Examples
|
|
123
|
+
|
|
124
|
+
For the fastest way to get started, use the included examples:
|
|
125
|
+
|
|
126
|
+
```bash
|
|
127
|
+
# Clone and setup
|
|
128
|
+
git clone <repository>
|
|
129
|
+
cd a2a-adapter
|
|
130
|
+
python -m venv .venv
|
|
131
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
132
|
+
pip install -e .
|
|
133
|
+
|
|
134
|
+
# Start an agent
|
|
135
|
+
./run_agent.sh n8n # N8n workflow agent
|
|
136
|
+
./run_agent.sh crewai # CrewAI agent
|
|
137
|
+
./run_agent.sh langchain # LangChain agent
|
|
138
|
+
|
|
139
|
+
# Stop with Ctrl+C
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
**Environment Variables:**
|
|
143
|
+
|
|
144
|
+
```bash
|
|
145
|
+
export N8N_WEBHOOK_URL="https://your-n8n.com/webhook/your-workflow"
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
### 📝 Manual Setup
|
|
149
|
+
|
|
150
|
+
### 1. N8n Workflow Agent
|
|
151
|
+
|
|
152
|
+
Expose an n8n workflow as an A2A agent:
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
import asyncio
|
|
156
|
+
from a2a_adapter import load_a2a_agent, serve_agent
|
|
157
|
+
from a2a.types import AgentCard
|
|
158
|
+
|
|
159
|
+
async def main():
|
|
160
|
+
# Load adapter
|
|
161
|
+
adapter = await load_a2a_agent({
|
|
162
|
+
"adapter": "n8n",
|
|
163
|
+
"webhook_url": "https://n8n.example.com/webhook/math",
|
|
164
|
+
"timeout": 30
|
|
165
|
+
})
|
|
166
|
+
|
|
167
|
+
# Define agent card
|
|
168
|
+
card = AgentCard(
|
|
169
|
+
name="Math Agent",
|
|
170
|
+
description="Performs mathematical calculations via n8n"
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Start server
|
|
174
|
+
serve_agent(agent_card=card, adapter=adapter, port=9000)
|
|
175
|
+
|
|
176
|
+
asyncio.run(main())
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
### 2. CrewAI Agent
|
|
180
|
+
|
|
181
|
+
Expose a CrewAI crew as an A2A agent:
|
|
182
|
+
|
|
183
|
+
```python
|
|
184
|
+
import asyncio
|
|
185
|
+
from crewai import Crew, Agent, Task
|
|
186
|
+
from a2a_adapter import load_a2a_agent, serve_agent
|
|
187
|
+
from a2a.types import AgentCard
|
|
188
|
+
|
|
189
|
+
# Create your crew
|
|
190
|
+
crew = Crew(
|
|
191
|
+
agents=[...],
|
|
192
|
+
tasks=[...],
|
|
193
|
+
verbose=True
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
async def main():
|
|
197
|
+
adapter = await load_a2a_agent({
|
|
198
|
+
"adapter": "crewai",
|
|
199
|
+
"crew": crew
|
|
200
|
+
})
|
|
201
|
+
|
|
202
|
+
card = AgentCard(
|
|
203
|
+
name="Research Crew",
|
|
204
|
+
description="Multi-agent research team"
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
serve_agent(agent_card=card, adapter=adapter, port=8001)
|
|
208
|
+
|
|
209
|
+
asyncio.run(main())
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
### 3. LangChain Agent (with Streaming)
|
|
213
|
+
|
|
214
|
+
Expose a LangChain chain with streaming support:
|
|
215
|
+
|
|
216
|
+
```python
|
|
217
|
+
import asyncio
|
|
218
|
+
from langchain_openai import ChatOpenAI
|
|
219
|
+
from langchain_core.prompts import ChatPromptTemplate
|
|
220
|
+
from a2a_adapter import load_a2a_agent, serve_agent
|
|
221
|
+
from a2a.types import AgentCard
|
|
222
|
+
|
|
223
|
+
# Create chain
|
|
224
|
+
prompt = ChatPromptTemplate.from_messages([
|
|
225
|
+
("system", "You are a helpful assistant."),
|
|
226
|
+
("user", "{input}")
|
|
227
|
+
])
|
|
228
|
+
llm = ChatOpenAI(model="gpt-4o-mini", streaming=True)
|
|
229
|
+
chain = prompt | llm
|
|
230
|
+
|
|
231
|
+
async def main():
|
|
232
|
+
adapter = await load_a2a_agent({
|
|
233
|
+
"adapter": "langchain",
|
|
234
|
+
"runnable": chain,
|
|
235
|
+
"input_key": "input"
|
|
236
|
+
})
|
|
237
|
+
|
|
238
|
+
card = AgentCard(
|
|
239
|
+
name="Chat Agent",
|
|
240
|
+
description="Streaming chat agent powered by GPT-4"
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
serve_agent(agent_card=card, adapter=adapter, port=8002)
|
|
244
|
+
|
|
245
|
+
asyncio.run(main())
|
|
246
|
+
```
|
|
247
|
+
|
|
248
|
+
### 4. Custom Adapter
|
|
249
|
+
|
|
250
|
+
Create a custom agent with any async function:
|
|
251
|
+
|
|
252
|
+
```python
|
|
253
|
+
import asyncio
|
|
254
|
+
from a2a_adapter import load_a2a_agent, serve_agent
|
|
255
|
+
from a2a.types import AgentCard
|
|
256
|
+
|
|
257
|
+
async def my_agent_function(inputs: dict) -> str:
|
|
258
|
+
"""Your custom agent logic."""
|
|
259
|
+
message = inputs["message"]
|
|
260
|
+
return f"Echo: {message}"
|
|
261
|
+
|
|
262
|
+
async def main():
|
|
263
|
+
adapter = await load_a2a_agent({
|
|
264
|
+
"adapter": "callable",
|
|
265
|
+
"callable": my_agent_function
|
|
266
|
+
})
|
|
267
|
+
|
|
268
|
+
card = AgentCard(
|
|
269
|
+
name="Echo Agent",
|
|
270
|
+
description="Simple echo agent"
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
serve_agent(agent_card=card, adapter=adapter, port=8003)
|
|
274
|
+
|
|
275
|
+
asyncio.run(main())
|
|
276
|
+
```
|
|
277
|
+
|
|
278
|
+
## Advanced Usage
|
|
279
|
+
|
|
280
|
+
### Custom Adapter Class
|
|
281
|
+
|
|
282
|
+
For full control, subclass `BaseAgentAdapter`:
|
|
283
|
+
|
|
284
|
+
```python
|
|
285
|
+
from a2a_adapter import BaseAgentAdapter
|
|
286
|
+
from a2a.types import Message, MessageSendParams, TextPart
|
|
287
|
+
|
|
288
|
+
class SentimentAnalyzer(BaseAgentAdapter):
|
|
289
|
+
async def to_framework(self, params: MessageSendParams):
|
|
290
|
+
# Extract user message
|
|
291
|
+
text = params.messages[-1].content[0].text
|
|
292
|
+
return {"text": text}
|
|
293
|
+
|
|
294
|
+
async def call_framework(self, framework_input, params):
|
|
295
|
+
# Your analysis logic
|
|
296
|
+
sentiment = analyze_sentiment(framework_input["text"])
|
|
297
|
+
return {"sentiment": sentiment}
|
|
298
|
+
|
|
299
|
+
async def from_framework(self, framework_output, params):
|
|
300
|
+
# Convert to A2A Message
|
|
301
|
+
return Message(
|
|
302
|
+
role="assistant",
|
|
303
|
+
content=[TextPart(
|
|
304
|
+
type="text",
|
|
305
|
+
text=f"Sentiment: {framework_output['sentiment']}"
|
|
306
|
+
)]
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# Use your custom adapter
|
|
310
|
+
adapter = SentimentAnalyzer()
|
|
311
|
+
serve_agent(agent_card=card, adapter=adapter, port=8004)
|
|
312
|
+
```
|
|
313
|
+
|
|
314
|
+
### Streaming Custom Adapter
|
|
315
|
+
|
|
316
|
+
Implement `handle_stream()` for streaming responses:
|
|
317
|
+
|
|
318
|
+
```python
|
|
319
|
+
class StreamingAdapter(BaseAgentAdapter):
|
|
320
|
+
async def handle_stream(self, params: MessageSendParams):
|
|
321
|
+
"""Yield SSE-compatible events."""
|
|
322
|
+
for chunk in generate_response_chunks():
|
|
323
|
+
yield {
|
|
324
|
+
"event": "message",
|
|
325
|
+
"data": json.dumps({"type": "content", "content": chunk})
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
yield {
|
|
329
|
+
"event": "done",
|
|
330
|
+
"data": json.dumps({"status": "completed"})
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
def supports_streaming(self):
|
|
334
|
+
return True
|
|
335
|
+
```
|
|
336
|
+
|
|
337
|
+
### Using with LangGraph
|
|
338
|
+
|
|
339
|
+
Integrate A2A agents into LangGraph workflows:
|
|
340
|
+
|
|
341
|
+
```python
|
|
342
|
+
from langgraph.graph import StateGraph
|
|
343
|
+
from a2a.client import A2AClient
|
|
344
|
+
|
|
345
|
+
# Create A2A client
|
|
346
|
+
math_agent = A2AClient(base_url="http://localhost:9000")
|
|
347
|
+
|
|
348
|
+
# Use in LangGraph node
|
|
349
|
+
async def call_math_agent(state):
|
|
350
|
+
response = await math_agent.send_message(
|
|
351
|
+
MessageSendParams(messages=[...])
|
|
352
|
+
)
|
|
353
|
+
return {"result": response}
|
|
354
|
+
|
|
355
|
+
# Add to graph
|
|
356
|
+
graph = StateGraph(...)
|
|
357
|
+
graph.add_node("math", call_math_agent)
|
|
358
|
+
```
|
|
359
|
+
|
|
360
|
+
See [examples/06_langgraph_single_agent.py](examples/06_langgraph_single_agent.py) for complete example.
|
|
361
|
+
|
|
362
|
+
## Configuration
|
|
363
|
+
|
|
364
|
+
### N8n Adapter
|
|
365
|
+
|
|
366
|
+
```python
|
|
367
|
+
{
|
|
368
|
+
"adapter": "n8n",
|
|
369
|
+
"webhook_url": "https://n8n.example.com/webhook/agent", # Required
|
|
370
|
+
"timeout": 30, # Optional, default: 30
|
|
371
|
+
"headers": { # Optional
|
|
372
|
+
"Authorization": "Bearer token"
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
```
|
|
376
|
+
|
|
377
|
+
### CrewAI Adapter
|
|
378
|
+
|
|
379
|
+
```python
|
|
380
|
+
{
|
|
381
|
+
"adapter": "crewai",
|
|
382
|
+
"crew": crew_instance, # Required: CrewAI Crew object
|
|
383
|
+
"inputs_key": "inputs" # Optional, default: "inputs"
|
|
384
|
+
}
|
|
385
|
+
```
|
|
386
|
+
|
|
387
|
+
### LangChain Adapter
|
|
388
|
+
|
|
389
|
+
```python
|
|
390
|
+
{
|
|
391
|
+
"adapter": "langchain",
|
|
392
|
+
"runnable": chain, # Required: Any Runnable
|
|
393
|
+
"input_key": "input", # Optional, default: "input"
|
|
394
|
+
"output_key": None # Optional, extracts specific key from output
|
|
395
|
+
}
|
|
396
|
+
```
|
|
397
|
+
|
|
398
|
+
### Callable Adapter
|
|
399
|
+
|
|
400
|
+
```python
|
|
401
|
+
{
|
|
402
|
+
"adapter": "callable",
|
|
403
|
+
"callable": async_function, # Required: async function
|
|
404
|
+
"supports_streaming": False # Optional, default: False
|
|
405
|
+
}
|
|
406
|
+
```
|
|
407
|
+
|
|
408
|
+
## Examples
|
|
409
|
+
|
|
410
|
+
The `examples/` directory contains complete working examples:
|
|
411
|
+
|
|
412
|
+
- **01_single_n8n_agent.py** - N8n workflow agent
|
|
413
|
+
- **02_single_crewai_agent.py** - CrewAI multi-agent crew
|
|
414
|
+
- **03_single_langchain_agent.py** - LangChain streaming agent
|
|
415
|
+
- **04_single_agent_client.py** - A2A client for testing
|
|
416
|
+
- **05_custom_adapter.py** - Custom adapter implementations
|
|
417
|
+
- **06_langgraph_single_agent.py** - LangGraph + A2A integration
|
|
418
|
+
|
|
419
|
+
Run any example:
|
|
420
|
+
|
|
421
|
+
```bash
|
|
422
|
+
# Start an agent server
|
|
423
|
+
python examples/01_single_n8n_agent.py
|
|
424
|
+
|
|
425
|
+
# In another terminal, test with client
|
|
426
|
+
python examples/04_single_agent_client.py
|
|
427
|
+
```
|
|
428
|
+
|
|
429
|
+
## Testing
|
|
430
|
+
|
|
431
|
+
```bash
|
|
432
|
+
# Install dev dependencies
|
|
433
|
+
pip install a2a-adapter[dev]
|
|
434
|
+
|
|
435
|
+
# Run unit tests
|
|
436
|
+
pytest tests/unit/
|
|
437
|
+
|
|
438
|
+
# Run integration tests (requires framework dependencies)
|
|
439
|
+
pytest tests/integration/
|
|
440
|
+
|
|
441
|
+
# Run all tests
|
|
442
|
+
pytest
|
|
443
|
+
```
|
|
444
|
+
|
|
445
|
+
## API Reference
|
|
446
|
+
|
|
447
|
+
### Core Functions
|
|
448
|
+
|
|
449
|
+
#### `load_a2a_agent(config: Dict[str, Any]) -> BaseAgentAdapter`
|
|
450
|
+
|
|
451
|
+
Factory function to create an adapter from configuration.
|
|
452
|
+
|
|
453
|
+
**Args:**
|
|
454
|
+
|
|
455
|
+
- `config`: Dictionary with `"adapter"` key and framework-specific options
|
|
456
|
+
|
|
457
|
+
**Returns:**
|
|
458
|
+
|
|
459
|
+
- Configured `BaseAgentAdapter` instance
|
|
460
|
+
|
|
461
|
+
**Raises:**
|
|
462
|
+
|
|
463
|
+
- `ValueError`: If adapter type is unknown or required config is missing
|
|
464
|
+
- `ImportError`: If required framework package is not installed
|
|
465
|
+
|
|
466
|
+
#### `build_agent_app(agent_card: AgentCard, adapter: BaseAgentAdapter) -> ASGIApp`
|
|
467
|
+
|
|
468
|
+
Build an ASGI application for serving an A2A agent.
|
|
469
|
+
|
|
470
|
+
**Args:**
|
|
471
|
+
|
|
472
|
+
- `agent_card`: A2A AgentCard describing the agent
|
|
473
|
+
- `adapter`: Adapter instance
|
|
474
|
+
|
|
475
|
+
**Returns:**
|
|
476
|
+
|
|
477
|
+
- ASGI application ready to be served
|
|
478
|
+
|
|
479
|
+
#### `serve_agent(agent_card, adapter, host="0.0.0.0", port=9000, **kwargs)`
|
|
480
|
+
|
|
481
|
+
Start serving an A2A agent (convenience function).
|
|
482
|
+
|
|
483
|
+
**Args:**
|
|
484
|
+
|
|
485
|
+
- `agent_card`: A2A AgentCard
|
|
486
|
+
- `adapter`: Adapter instance
|
|
487
|
+
- `host`: Host address (default: "0.0.0.0")
|
|
488
|
+
- `port`: Port number (default: 9000)
|
|
489
|
+
- `**kwargs`: Additional arguments passed to `uvicorn.run()`
|
|
490
|
+
|
|
491
|
+
### BaseAgentAdapter
|
|
492
|
+
|
|
493
|
+
Abstract base class for all adapters.
|
|
494
|
+
|
|
495
|
+
#### Methods
|
|
496
|
+
|
|
497
|
+
##### `async def handle(params: MessageSendParams) -> Message | Task`
|
|
498
|
+
|
|
499
|
+
Handle a non-streaming A2A message request.
|
|
500
|
+
|
|
501
|
+
##### `async def handle_stream(params: MessageSendParams) -> AsyncIterator[Dict]`
|
|
502
|
+
|
|
503
|
+
Handle a streaming A2A message request. Override in subclasses that support streaming.
|
|
504
|
+
|
|
505
|
+
##### `@abstractmethod async def to_framework(params: MessageSendParams) -> Any`
|
|
506
|
+
|
|
507
|
+
Convert A2A message parameters to framework-specific input.
|
|
508
|
+
|
|
509
|
+
##### `@abstractmethod async def call_framework(framework_input: Any, params: MessageSendParams) -> Any`
|
|
510
|
+
|
|
511
|
+
Execute the underlying agent framework.
|
|
512
|
+
|
|
513
|
+
##### `@abstractmethod async def from_framework(framework_output: Any, params: MessageSendParams) -> Message | Task`
|
|
514
|
+
|
|
515
|
+
Convert framework output to A2A Message or Task.
|
|
516
|
+
|
|
517
|
+
##### `def supports_streaming() -> bool`
|
|
518
|
+
|
|
519
|
+
Check if this adapter supports streaming responses.
|
|
520
|
+
|
|
521
|
+
## Framework Support
|
|
522
|
+
|
|
523
|
+
| Framework | Adapter | Streaming | Status |
|
|
524
|
+
| ------------------- | ----------------------- | ----------- | ---------- |
|
|
525
|
+
| **n8n** | `N8nAgentAdapter` | ❌ | ✅ Stable |
|
|
526
|
+
| **CrewAI** | `CrewAIAgentAdapter` | ❌ | ✅ Stable |
|
|
527
|
+
| **LangChain** | `LangChainAgentAdapter` | ✅ | ✅ Stable |
|
|
528
|
+
| **Custom Function** | `CallableAgentAdapter` | ✅ Optional | ✅ Stable |
|
|
529
|
+
| **AutoGen** | - | - | 🔜 Planned |
|
|
530
|
+
| **Semantic Kernel** | - | - | 🔜 Planned |
|
|
531
|
+
|
|
532
|
+
## Contributing
|
|
533
|
+
|
|
534
|
+
We welcome contributions! To add support for a new framework:
|
|
535
|
+
|
|
536
|
+
1. Create `a2a_adapter/integrations/{framework}.py`
|
|
537
|
+
2. Implement a class extending `BaseAgentAdapter`
|
|
538
|
+
3. Add to `loader.py` factory function
|
|
539
|
+
4. Update `integrations/__init__.py`
|
|
540
|
+
5. Add optional dependency to `pyproject.toml`
|
|
541
|
+
6. Create an example in `examples/`
|
|
542
|
+
7. Add tests in `tests/`
|
|
543
|
+
8. Update this README
|
|
544
|
+
|
|
545
|
+
See [ARCHITECTURE.md](ARCHITECTURE.md) for detailed guidance.
|
|
546
|
+
|
|
547
|
+
## Roadmap
|
|
548
|
+
|
|
549
|
+
- [x] Core adapter abstraction
|
|
550
|
+
- [x] N8n adapter
|
|
551
|
+
- [x] CrewAI adapter
|
|
552
|
+
- [x] LangChain adapter with streaming
|
|
553
|
+
- [x] Callable adapter
|
|
554
|
+
- [x] Comprehensive examples
|
|
555
|
+
- [ ] Task support (async execution pattern)
|
|
556
|
+
- [ ] Artifact support (file uploads/downloads)
|
|
557
|
+
- [ ] AutoGen adapter
|
|
558
|
+
- [ ] Semantic Kernel adapter
|
|
559
|
+
- [ ] Haystack adapter
|
|
560
|
+
- [ ] Middleware system (logging, metrics, rate limiting)
|
|
561
|
+
- [ ] Configuration validation with Pydantic
|
|
562
|
+
- [ ] Docker images for quick deployment
|
|
563
|
+
|
|
564
|
+
## FAQ
|
|
565
|
+
|
|
566
|
+
### Q: Can I run multiple agents in one process?
|
|
567
|
+
|
|
568
|
+
**A:** This SDK is designed for single-agent-per-process. For multi-agent systems, run multiple A2A servers and orchestrate them externally using the A2A protocol or tools like LangGraph.
|
|
569
|
+
|
|
570
|
+
### Q: Does this support the latest A2A protocol version?
|
|
571
|
+
|
|
572
|
+
**A:** Yes, we use the official A2A SDK which stays up-to-date with protocol changes.
|
|
573
|
+
|
|
574
|
+
### Q: Can I use this with my custom agent framework?
|
|
575
|
+
|
|
576
|
+
**A:** Absolutely! Use the `CallableAgentAdapter` for simple cases or subclass `BaseAgentAdapter` for full control.
|
|
577
|
+
|
|
578
|
+
### Q: What about authentication and rate limiting?
|
|
579
|
+
|
|
580
|
+
**A:** These concerns are handled at the infrastructure level (reverse proxy, API gateway) or by the official A2A SDK. Adapters focus solely on framework integration.
|
|
581
|
+
|
|
582
|
+
### Q: How do I debug adapter issues?
|
|
583
|
+
|
|
584
|
+
**A:** Set `log_level="debug"` in `serve_agent()` and check logs. Each adapter logs framework calls and responses.
|
|
585
|
+
|
|
586
|
+
## License
|
|
587
|
+
|
|
588
|
+
MIT License - see [LICENSE](LICENSE) file for details.
|
|
589
|
+
|
|
590
|
+
## Credits
|
|
591
|
+
|
|
592
|
+
Built with ❤️ by [HYBRO AI](https://hybro.ai)
|
|
593
|
+
|
|
594
|
+
Powered by the [A2A Protocol](https://github.com/a2a-protocol/a2a-protocol)
|
|
595
|
+
|
|
596
|
+
## Support
|
|
597
|
+
|
|
598
|
+
- 📚 [Documentation](https://github.com/hybro-ai/a2a-adapter)
|
|
599
|
+
- 🐛 [Issue Tracker](https://github.com/hybro-ai/a2a-adapter/issues)
|
|
600
|
+
- 💬 [Discussions](https://github.com/hybro-ai/a2a-adapter/discussions)
|
|
601
|
+
|
|
602
|
+
---
|
|
603
|
+
|
|
604
|
+
**Star ⭐ this repo if you find it useful!**
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
a2a_adapter/__init__.py,sha256=Qin3SI0AlS2VZYwJalOZAX2btECOXDgt2nuHDWsAjAg,1252
|
|
2
|
+
a2a_adapter/adapter.py,sha256=xCHmNNi5C_71A8OlIf_UZ1gUcvAFAIMrq8vGV_tYuTM,6098
|
|
3
|
+
a2a_adapter/client.py,sha256=TLkYBGQitZdrw_FY_ov9EjHicJX0dqqXaQ-dwfkUv2I,7397
|
|
4
|
+
a2a_adapter/loader.py,sha256=QEhozlS0dOu4qXoWpqglV1J2x2FN-CtWq4_ueq5Pr7o,4692
|
|
5
|
+
a2a_adapter/integrations/__init__.py,sha256=vDYdcveV_U-nr39fvSnpIif2QgDF_AS6CPlElpBDrDs,1099
|
|
6
|
+
a2a_adapter/integrations/callable.py,sha256=GsKZO5TyJVMOAS21cr7JNVKqjg7Z2CSc9Q9vhzgkapY,5917
|
|
7
|
+
a2a_adapter/integrations/crewai.py,sha256=S4mF1mJeTAfMdif-su1E7jUjWgxAeFKlgz9WXlZEKN0,4813
|
|
8
|
+
a2a_adapter/integrations/langchain.py,sha256=raaaJA_FS3qBsURbCbBq2bcfsRMdKgpVbKEUN3rWA4s,5866
|
|
9
|
+
a2a_adapter/integrations/n8n.py,sha256=d9RqAS7y4eJZPWBaSOHBnT-wqFvTuJtmbE4rsvu5V2o,30171
|
|
10
|
+
a2a_adapter-0.1.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
11
|
+
a2a_adapter-0.1.0.dist-info/METADATA,sha256=GV6R5E3sp4b9b0szp53n1UT7EREDAgI9OmZL8oHPLSc,16527
|
|
12
|
+
a2a_adapter-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
13
|
+
a2a_adapter-0.1.0.dist-info/top_level.txt,sha256=b1O1dTJ2AoPEB2x-r5IHEsS2x1fczOzTrpR2DgF3LgE,12
|
|
14
|
+
a2a_adapter-0.1.0.dist-info/RECORD,,
|