a2a-adapter 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,142 @@
1
+ """
2
+ CrewAI adapter for A2A Protocol.
3
+
4
+ This adapter enables CrewAI crews to be exposed as A2A-compliant agents
5
+ by translating A2A messages to crew inputs and crew outputs back to A2A.
6
+ """
7
+
8
+ import json
9
+ from typing import Any, Dict
10
+
11
+ from a2a.types import Message, MessageSendParams, Task, TextPart
12
+
13
+
14
+ class CrewAIAgentAdapter:
15
+ """
16
+ Adapter for integrating CrewAI crews as A2A agents.
17
+
18
+ This adapter handles the translation between A2A protocol messages
19
+ and CrewAI's crew execution model.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ crew: Any, # Type: crewai.Crew (avoiding hard dependency)
25
+ inputs_key: str = "inputs",
26
+ ):
27
+ """
28
+ Initialize the CrewAI adapter.
29
+
30
+ Args:
31
+ crew: A CrewAI Crew instance to execute
32
+ inputs_key: The key name for passing inputs to the crew (default: "inputs")
33
+ """
34
+ self.crew = crew
35
+ self.inputs_key = inputs_key
36
+
37
+ async def handle(self, params: MessageSendParams) -> Message | Task:
38
+ """Handle a non-streaming A2A message request."""
39
+ framework_input = await self.to_framework(params)
40
+ framework_output = await self.call_framework(framework_input, params)
41
+ return await self.from_framework(framework_output, params)
42
+
43
+ async def to_framework(self, params: MessageSendParams) -> Dict[str, Any]:
44
+ """
45
+ Convert A2A message parameters to CrewAI crew inputs.
46
+
47
+ Extracts the user's message and prepares it as input for the crew.
48
+
49
+ Args:
50
+ params: A2A message parameters
51
+
52
+ Returns:
53
+ Dictionary with crew input data
54
+ """
55
+ # Extract text from the last user message
56
+ user_message = ""
57
+ if params.messages:
58
+ last_message = params.messages[-1]
59
+ if hasattr(last_message, "content"):
60
+ if isinstance(last_message.content, list):
61
+ # Extract text from content blocks
62
+ text_parts = [
63
+ item.text
64
+ for item in last_message.content
65
+ if hasattr(item, "text")
66
+ ]
67
+ user_message = " ".join(text_parts)
68
+ elif isinstance(last_message.content, str):
69
+ user_message = last_message.content
70
+
71
+ # Build crew inputs
72
+ # CrewAI typically expects a dict with task-specific keys
73
+ return {
74
+ self.inputs_key: user_message,
75
+ "message": user_message,
76
+ "session_id": getattr(params, "session_id", None),
77
+ }
78
+
79
+ async def call_framework(
80
+ self, framework_input: Dict[str, Any], params: MessageSendParams
81
+ ) -> Any:
82
+ """
83
+ Execute the CrewAI crew with the provided inputs.
84
+
85
+ Args:
86
+ framework_input: Input dictionary for the crew
87
+ params: Original A2A parameters (for context)
88
+
89
+ Returns:
90
+ CrewAI crew execution output
91
+
92
+ Raises:
93
+ Exception: If crew execution fails
94
+ """
95
+ # CrewAI supports async execution via kickoff_async
96
+ try:
97
+ result = await self.crew.kickoff_async(inputs=framework_input)
98
+ return result
99
+ except AttributeError:
100
+ # Fallback for older CrewAI versions without async support
101
+ # Note: This will block the event loop
102
+ import asyncio
103
+
104
+ loop = asyncio.get_event_loop()
105
+ result = await loop.run_in_executor(
106
+ None, self.crew.kickoff, framework_input
107
+ )
108
+ return result
109
+
110
+ async def from_framework(
111
+ self, framework_output: Any, params: MessageSendParams
112
+ ) -> Message | Task:
113
+ """
114
+ Convert CrewAI crew output to A2A Message.
115
+
116
+ Args:
117
+ framework_output: Output from crew execution
118
+ params: Original A2A parameters
119
+
120
+ Returns:
121
+ A2A Message with the crew's response
122
+ """
123
+ # CrewAI output can be various types (string, dict, CrewOutput object)
124
+ if hasattr(framework_output, "raw"):
125
+ # CrewOutput object
126
+ response_text = str(framework_output.raw)
127
+ elif isinstance(framework_output, dict):
128
+ # Dictionary output - serialize as JSON
129
+ response_text = json.dumps(framework_output, indent=2)
130
+ else:
131
+ # String or other type - convert to string
132
+ response_text = str(framework_output)
133
+
134
+ return Message(
135
+ role="assistant",
136
+ content=[TextPart(type="text", text=response_text)],
137
+ )
138
+
139
+ def supports_streaming(self) -> bool:
140
+ """Check if this adapter supports streaming responses."""
141
+ return False
142
+
@@ -0,0 +1,171 @@
1
+ """
2
+ LangChain adapter for A2A Protocol.
3
+
4
+ This adapter enables LangChain runnables (chains, agents) to be exposed
5
+ as A2A-compliant agents with support for both streaming and non-streaming modes.
6
+ """
7
+
8
+ import json
9
+ from typing import Any, AsyncIterator, Dict
10
+
11
+ from a2a.types import Message, MessageSendParams, Task, TextPart
12
+
13
+
14
+ class LangChainAgentAdapter:
15
+ """
16
+ Adapter for integrating LangChain runnables as A2A agents.
17
+
18
+ This adapter works with any LangChain Runnable (chains, agents, etc.)
19
+ and supports both streaming and non-streaming execution modes.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ runnable: Any, # Type: Runnable (avoiding hard dependency)
25
+ input_key: str = "input",
26
+ output_key: str | None = None,
27
+ ):
28
+ """
29
+ Initialize the LangChain adapter.
30
+
31
+ Args:
32
+ runnable: A LangChain Runnable instance (chain, agent, etc.)
33
+ input_key: The key name for passing input to the runnable (default: "input")
34
+ output_key: Optional key to extract from runnable output. If None, uses the entire output.
35
+ """
36
+ self.runnable = runnable
37
+ self.input_key = input_key
38
+ self.output_key = output_key
39
+
40
+ async def handle(self, params: MessageSendParams) -> Message | Task:
41
+ """Handle a non-streaming A2A message request."""
42
+ framework_input = await self.to_framework(params)
43
+ framework_output = await self.call_framework(framework_input, params)
44
+ return await self.from_framework(framework_output, params)
45
+
46
+ async def handle_stream(
47
+ self, params: MessageSendParams
48
+ ) -> AsyncIterator[Dict[str, Any]]:
49
+ """
50
+ Handle a streaming A2A message request.
51
+
52
+ Yields Server-Sent Events compatible dictionaries with streaming chunks.
53
+ """
54
+ framework_input = await self.to_framework(params)
55
+
56
+ # Stream from LangChain runnable
57
+ async for chunk in self.runnable.astream(framework_input):
58
+ # Extract text from chunk
59
+ if hasattr(chunk, "content"):
60
+ text = chunk.content
61
+ elif isinstance(chunk, dict):
62
+ text = chunk.get(self.output_key or "output", str(chunk))
63
+ else:
64
+ text = str(chunk)
65
+
66
+ # Yield SSE-compatible event
67
+ if text:
68
+ yield {
69
+ "event": "message",
70
+ "data": json.dumps({
71
+ "type": "content",
72
+ "content": text,
73
+ }),
74
+ }
75
+
76
+ # Send completion event
77
+ yield {
78
+ "event": "done",
79
+ "data": json.dumps({"status": "completed"}),
80
+ }
81
+
82
+ def supports_streaming(self) -> bool:
83
+ """Check if the runnable supports streaming."""
84
+ return hasattr(self.runnable, "astream")
85
+
86
+ async def to_framework(self, params: MessageSendParams) -> Dict[str, Any]:
87
+ """
88
+ Convert A2A message parameters to LangChain runnable input.
89
+
90
+ Args:
91
+ params: A2A message parameters
92
+
93
+ Returns:
94
+ Dictionary with runnable input data
95
+ """
96
+ # Extract text from the last user message
97
+ user_message = ""
98
+ if params.messages:
99
+ last_message = params.messages[-1]
100
+ if hasattr(last_message, "content"):
101
+ if isinstance(last_message.content, list):
102
+ # Extract text from content blocks
103
+ text_parts = [
104
+ item.text
105
+ for item in last_message.content
106
+ if hasattr(item, "text")
107
+ ]
108
+ user_message = " ".join(text_parts)
109
+ elif isinstance(last_message.content, str):
110
+ user_message = last_message.content
111
+
112
+ # Build runnable input
113
+ return {
114
+ self.input_key: user_message,
115
+ }
116
+
117
+ async def call_framework(
118
+ self, framework_input: Dict[str, Any], params: MessageSendParams
119
+ ) -> Any:
120
+ """
121
+ Execute the LangChain runnable with the provided input.
122
+
123
+ Args:
124
+ framework_input: Input dictionary for the runnable
125
+ params: Original A2A parameters (for context)
126
+
127
+ Returns:
128
+ Runnable execution output
129
+
130
+ Raises:
131
+ Exception: If runnable execution fails
132
+ """
133
+ result = await self.runnable.ainvoke(framework_input)
134
+ return result
135
+
136
+ async def from_framework(
137
+ self, framework_output: Any, params: MessageSendParams
138
+ ) -> Message | Task:
139
+ """
140
+ Convert LangChain runnable output to A2A Message.
141
+
142
+ Args:
143
+ framework_output: Output from runnable execution
144
+ params: Original A2A parameters
145
+
146
+ Returns:
147
+ A2A Message with the runnable's response
148
+ """
149
+ # Extract output based on type
150
+ if hasattr(framework_output, "content"):
151
+ # AIMessage or similar
152
+ response_text = framework_output.content
153
+ elif isinstance(framework_output, dict):
154
+ # Dictionary output - extract using output_key or serialize
155
+ if self.output_key and self.output_key in framework_output:
156
+ response_text = str(framework_output[self.output_key])
157
+ else:
158
+ response_text = json.dumps(framework_output, indent=2)
159
+ else:
160
+ # String or other type - convert to string
161
+ response_text = str(framework_output)
162
+
163
+ return Message(
164
+ role="assistant",
165
+ content=[TextPart(type="text", text=response_text)],
166
+ )
167
+
168
+ def supports_streaming(self) -> bool:
169
+ """Check if this adapter supports streaming responses."""
170
+ return False
171
+