smarta2a 0.2.4__tar.gz → 0.2.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. {smarta2a-0.2.4 → smarta2a-0.2.5}/PKG-INFO +1 -1
  2. {smarta2a-0.2.4 → smarta2a-0.2.5}/pyproject.toml +1 -1
  3. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/agent/a2a_agent.py +16 -8
  4. {smarta2a-0.2.4/smarta2a/client → smarta2a-0.2.5/smarta2a/archive}/smart_mcp_client.py +4 -1
  5. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/client/a2a_client.py +2 -2
  6. {smarta2a-0.2.4/smarta2a/archive → smarta2a-0.2.5/smarta2a/client}/mcp_client.py +12 -4
  7. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/client/tools_manager.py +15 -12
  8. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/examples/echo_server/main.py +5 -3
  9. smarta2a-0.2.5/smarta2a/examples/openai_agent/__init__.py +0 -0
  10. smarta2a-0.2.5/smarta2a/examples/openai_agent/main.py +32 -0
  11. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/model_providers/openai_provider.py +56 -49
  12. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/server/server.py +8 -6
  13. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/utils/types.py +1 -1
  14. smarta2a-0.2.4/examples/__init__.py +0 -3
  15. smarta2a-0.2.4/examples/agents/__init__.py +0 -3
  16. {smarta2a-0.2.4 → smarta2a-0.2.5}/.gitignore +0 -0
  17. {smarta2a-0.2.4 → smarta2a-0.2.5}/LICENSE +0 -0
  18. {smarta2a-0.2.4 → smarta2a-0.2.5}/README.md +0 -0
  19. {smarta2a-0.2.4 → smarta2a-0.2.5}/documentation/smarta2a_docs/docs/blog/announcements.md +0 -0
  20. {smarta2a-0.2.4 → smarta2a-0.2.5}/documentation/smarta2a_docs/docs/getting-started/index.md +0 -0
  21. {smarta2a-0.2.4 → smarta2a-0.2.5}/documentation/smarta2a_docs/docs/getting-started/installation.md +0 -0
  22. {smarta2a-0.2.4 → smarta2a-0.2.5}/documentation/smarta2a_docs/docs/getting-started/quickstart.md +0 -0
  23. {smarta2a-0.2.4 → smarta2a-0.2.5}/documentation/smarta2a_docs/docs/index.md +0 -0
  24. {smarta2a-0.2.4 → smarta2a-0.2.5}/documentation/smarta2a_docs/docs/tutorials/example1.md +0 -0
  25. {smarta2a-0.2.4 → smarta2a-0.2.5}/documentation/smarta2a_docs/docs/tutorials/example2.md +0 -0
  26. {smarta2a-0.2.4 → smarta2a-0.2.5}/documentation/smarta2a_docs/mkdocs.yml +0 -0
  27. {smarta2a-0.2.4 → smarta2a-0.2.5}/requirements.txt +0 -0
  28. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/__init__.py +0 -0
  29. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/agent/a2a_mcp_server.py +0 -0
  30. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/client/__init__.py +0 -0
  31. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/examples/__init__.py +0 -0
  32. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/examples/echo_server/__init__.py +0 -0
  33. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/examples/echo_server/curl.txt +0 -0
  34. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/history_update_strategies/__init__.py +0 -0
  35. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/history_update_strategies/append_strategy.py +0 -0
  36. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/history_update_strategies/history_update_strategy.py +0 -0
  37. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/model_providers/__init__.py +0 -0
  38. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/model_providers/base_llm_provider.py +0 -0
  39. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/server/__init__.py +0 -0
  40. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/server/handler_registry.py +0 -0
  41. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/server/state_manager.py +0 -0
  42. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/server/subscription_service.py +0 -0
  43. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/server/task_service.py +0 -0
  44. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/state_stores/__init__.py +0 -0
  45. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/state_stores/base_state_store.py +0 -0
  46. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/state_stores/inmemory_state_store.py +0 -0
  47. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/utils/__init__.py +0 -0
  48. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/utils/prompt_helpers.py +0 -0
  49. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/utils/task_builder.py +0 -0
  50. {smarta2a-0.2.4 → smarta2a-0.2.5}/smarta2a/utils/task_request_builder.py +0 -0
  51. {smarta2a-0.2.4 → smarta2a-0.2.5}/tests/__init__.py +0 -0
  52. {smarta2a-0.2.4 → smarta2a-0.2.5}/tests/test_server.py +0 -0
  53. {smarta2a-0.2.4 → smarta2a-0.2.5}/tests/test_server_history.py +0 -0
  54. {smarta2a-0.2.4 → smarta2a-0.2.5}/tests/test_task_request_builder.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: smarta2a
3
- Version: 0.2.4
3
+ Version: 0.2.5
4
4
  Summary: A simple Python framework (built on top of FastAPI) for creating Agents following Google's Agent2Agent protocol
5
5
  Project-URL: Homepage, https://github.com/siddharthsma/smarta2a
6
6
  Project-URL: Bug Tracker, https://github.com/siddharthsma/smarta2a/issues
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "smarta2a"
7
- version = "0.2.4"
7
+ version = "0.2.5"
8
8
  authors = [
9
9
  { name = "Siddharth Ambegaonkar", email = "siddharthsma@gmail.com" },
10
10
  ]
@@ -5,7 +5,9 @@
5
5
  from smarta2a.server import SmartA2A
6
6
  from smarta2a.model_providers.base_llm_provider import BaseLLMProvider
7
7
  from smarta2a.history_update_strategies.history_update_strategy import HistoryUpdateStrategy
8
+ from smarta2a.history_update_strategies.append_strategy import AppendStrategy
8
9
  from smarta2a.state_stores.base_state_store import BaseStateStore
10
+ from smarta2a.state_stores.inmemory_state_store import InMemoryStateStore
9
11
  from smarta2a.utils.types import StateData, SendTaskRequest
10
12
 
11
13
  class A2AAgent:
@@ -13,26 +15,32 @@ class A2AAgent:
13
15
  self,
14
16
  name: str,
15
17
  model_provider: BaseLLMProvider,
16
- history_update_strategy: HistoryUpdateStrategy,
17
- state_storage: BaseStateStore,
18
+ history_update_strategy: HistoryUpdateStrategy = None,
19
+ state_store: BaseStateStore = None,
18
20
  ):
19
21
  self.model_provider = model_provider
22
+ self.history_update_strategy = history_update_strategy or AppendStrategy()
23
+ self.state_store = state_store or InMemoryStateStore()
20
24
  self.app = SmartA2A(
21
25
  name=name,
22
- history_update_strategy=history_update_strategy,
23
- state_storage=state_storage
26
+ history_update_strategy=self.history_update_strategy,
27
+ state_store=self.state_store
24
28
  )
25
29
  self.__register_handlers()
26
30
 
27
31
  def __register_handlers(self):
32
+ @self.app.on_event("startup")
33
+ async def on_startup():
34
+ await self.model_provider.load()
35
+
28
36
  @self.app.on_send_task()
29
37
  async def on_send_task(request: SendTaskRequest, state: StateData):
30
- response = self.model_provider.generate(state.history)
38
+ response = await self.model_provider.generate(state.history)
31
39
  return response
40
+
41
+ def get_app(self):
42
+ return self.app
32
43
 
33
- def start(self, **kwargs):
34
- self.app.configure(**kwargs)
35
- self.app.run()
36
44
 
37
45
 
38
46
 
@@ -1,4 +1,4 @@
1
- from mcp.client import Client
1
+ from mcp.client import ClientSession, sse_client, stdio_client, StdioServerParameters
2
2
  from typing import Optional, Dict, Any
3
3
 
4
4
  class SmartMCPClient:
@@ -7,6 +7,9 @@ class SmartMCPClient:
7
7
  Initialize with the server URL. Headers are provided per request, not globally.
8
8
  """
9
9
  self.base_url = base_url
10
+ self.session = None
11
+ self.exit_stack = AsyncExitStack()
12
+ self._connect_to_server()
10
13
 
11
14
  async def list_tools(self, session_id: Optional[str] = None) -> Any:
12
15
  """
@@ -181,7 +181,7 @@ class A2AClient:
181
181
  raise A2AClientHTTPError(400, str(e)) from e
182
182
 
183
183
 
184
- def list_tools(self) -> list[dict[str, Any]]:
184
+ async def list_tools(self) -> list[dict[str, Any]]:
185
185
  """Return metadata for all available tools."""
186
186
  tools = []
187
187
  tool_names = [
@@ -224,7 +224,7 @@ class A2AClient:
224
224
  tools.append({
225
225
  'name': name,
226
226
  'description': description,
227
- 'input_schema': schema
227
+ 'inputSchema': schema
228
228
  })
229
229
  return tools
230
230
 
@@ -1,14 +1,22 @@
1
1
  # Library imports
2
2
  import re
3
+ from typing import Dict, Any
3
4
  from contextlib import AsyncExitStack
4
- from mcp.client import ClientSession, sse_client, stdio_client, StdioServerParameters
5
+ from mcp import ClientSession, StdioServerParameters
6
+ from mcp.client.stdio import stdio_client
7
+ from mcp.client.sse import sse_client
5
8
 
6
9
 
7
10
  class MCPClient:
8
11
  def __init__(self):
9
12
  self.session = None
10
13
  self.exit_stack = AsyncExitStack()
11
- self._connect_to_server()
14
+
15
+ @classmethod
16
+ async def create(cls, server_path_or_url: str):
17
+ client = cls()
18
+ await client._connect_to_server(server_path_or_url)
19
+ return client
12
20
 
13
21
  async def _connect_to_sse_server(self, server_url: str):
14
22
  """Connect to an SSE MCP server."""
@@ -72,9 +80,9 @@ class MCPClient:
72
80
  response = await self.session.list_tools()
73
81
  return response.tools
74
82
 
75
- async def call_tool(self, tool_name: str, **tool_args):
83
+ async def call_tool(self, tool_name: str, tool_args: Dict[str, Any]):
76
84
  """Call a tool."""
77
- response = await self.session.call_tool(tool_name, **tool_args)
85
+ response = await self.session.call_tool(tool_name, tool_args)
78
86
  return response.content
79
87
 
80
88
  async def cleanup(self):
@@ -3,7 +3,7 @@ import json
3
3
  from typing import List, Dict, Any, Union, Literal
4
4
 
5
5
  # Local imports
6
- from smarta2a.client.smart_mcp_client import SmartMCPClient
6
+ from smarta2a.client.mcp_client import MCPClient
7
7
  from smarta2a.client.a2a_client import A2AClient
8
8
  from smarta2a.utils.types import AgentCard
9
9
 
@@ -14,21 +14,23 @@ class ToolsManager:
14
14
  """
15
15
  def __init__(self):
16
16
  self.tools_list: List[Any] = []
17
- self.clients: Dict[str, Union[SmartMCPClient, A2AClient]] = {}
17
+ self.clients: Dict[str, Union[MCPClient, A2AClient]] = {}
18
18
 
19
- def load_mcp_tools(self, urls_or_paths: List[str]) -> None:
19
+ async def load_mcp_tools(self, urls_or_paths: List[str]) -> None:
20
20
  for url in urls_or_paths:
21
- mcp_client = SmartMCPClient(url)
22
- for tool in mcp_client.list_tools():
21
+ mcp_client = await MCPClient.create(url)
22
+ tools = await mcp_client.list_tools()
23
+ for tool in tools:
23
24
  self.tools_list.append(tool)
24
25
  self.clients[tool.name] = mcp_client
25
26
 
26
- def load_a2a_tools(self, agent_cards: List[AgentCard]) -> None:
27
+ async def load_a2a_tools(self, agent_cards: List[AgentCard]) -> None:
27
28
  for agent_card in agent_cards:
28
29
  a2a_client = A2AClient(agent_card)
29
- for tool in a2a_client.list_tools():
30
+ tools = await a2a_client.list_tools()
31
+ for tool in tools:
30
32
  self.tools_list.append(tool)
31
- self.clients[tool.name] = a2a_client
33
+ self.clients[tool.name] = a2a_client
32
34
 
33
35
  def get_tools(self) -> List[Any]:
34
36
  return self.tools_list
@@ -37,15 +39,16 @@ class ToolsManager:
37
39
  def describe_tools(self, client_type: Literal["mcp", "a2a"]) -> str:
38
40
  lines = []
39
41
  for tool in self.tools_list:
40
- if client_type == "mcp" and isinstance(tool, SmartMCPClient):
41
- schema = json.dumps(tool.input_schema, indent=2)
42
+ schema = json.dumps(tool.inputSchema, indent=2) # Fix: use inputSchema
43
+ if client_type == "mcp":
42
44
  lines.append(
43
45
  f"- **{tool.name}**: {tool.description}\n Parameters schema:\n ```json\n{schema}\n```"
44
46
  )
45
- elif client_type == "a2a" and isinstance(tool, A2AClient):
47
+ elif client_type == "a2a":
46
48
  lines.append(
47
- f"- **{tool.name}**: {tool.description} Parameters schema:\n ```json\n{schema}\n```"
49
+ f"- **{tool.name}**: {tool.description}\n Parameters schema:\n ```json\n{schema}\n```"
48
50
  )
51
+
49
52
  return "\n".join(lines)
50
53
 
51
54
  def get_client(self, tool_name: str) -> Any:
@@ -1,10 +1,12 @@
1
1
  from smarta2a.server import SmartA2A
2
2
  from smarta2a.utils.types import A2AResponse, TaskStatus, TaskState, TextPart, FileContent, FilePart
3
+ from smarta2a.state_stores.inmemory_state_store import InMemoryStateStore
3
4
 
4
- app = SmartA2A("EchoServer")
5
+ state_store = InMemoryStateStore()
6
+ app = SmartA2A("EchoServer", state_store=state_store)
5
7
 
6
8
  @app.on_send_task()
7
- def handle_task(request):
9
+ async def handle_task(request, state):
8
10
  """Echo the input text back as a completed task"""
9
11
  input_text = request.content[0].text
10
12
  #return f"Response to task: {input_text}"
@@ -14,7 +16,7 @@ def handle_task(request):
14
16
  )
15
17
 
16
18
  @app.on_send_subscribe_task()
17
- async def handle_subscribe_task(request):
19
+ async def handle_subscribe_task(request, state):
18
20
  """Subscribe to the task"""
19
21
  input_text = request.content[0].text
20
22
  yield f"First response to the task: {input_text}"
@@ -0,0 +1,32 @@
1
+ # Imports
2
+ from dotenv import load_dotenv
3
+ import os
4
+ import uvicorn
5
+ import asyncio
6
+ from smarta2a.agent.a2a_agent import A2AAgent
7
+ from smarta2a.model_providers.openai_provider import OpenAIProvider
8
+
9
+
10
+
11
+ # Load environment variables from the .env file
12
+ load_dotenv()
13
+
14
+ # Fetch the value using os.getenv
15
+ api_key = os.getenv("OPENAI_API_KEY")
16
+
17
+
18
+ openai_provider = OpenAIProvider(
19
+ api_key=api_key,
20
+ model="gpt-4o-mini",
21
+ mcp_server_urls_or_paths=["/Users/apple/Desktop/Code/weather/weather.py"],
22
+ )
23
+
24
+ # Create the agent
25
+ agent = A2AAgent(
26
+ name="openai_agent",
27
+ model_provider=openai_provider,
28
+ )
29
+
30
+ # Entry point
31
+ if __name__ == "__main__":
32
+ uvicorn.run(agent.get_app(), host="0.0.0.0", port=8000)
@@ -28,13 +28,17 @@ class OpenAIProvider(BaseLLMProvider):
28
28
  self.supported_media_types = [
29
29
  "image/png", "image/jpeg", "image/gif", "image/webp"
30
30
  ]
31
- # Initialize ToolsManager and load MCP tools if given
31
+ # Initialize ToolsManager
32
32
  self.tools_manager = ToolsManager()
33
- if mcp_server_urls_or_paths:
34
- self.tools_manager.load_mcp_tools(mcp_server_urls_or_paths)
35
33
 
36
- if agent_cards:
37
- self.tools_manager.load_a2a_tools(agent_cards)
34
+
35
+ async def load(self):
36
+ if self.mcp_server_urls_or_paths:
37
+ await self.tools_manager.load_mcp_tools(self.mcp_server_urls_or_paths)
38
+
39
+ if self.agent_cards:
40
+ await self.tools_manager.load_a2a_tools(self.agent_cards)
41
+
38
42
 
39
43
  def _build_system_prompt(self) -> str:
40
44
  """Get the system prompt with tool descriptions."""
@@ -80,12 +84,11 @@ class OpenAIProvider(BaseLLMProvider):
80
84
  """Convert messages to OpenAI format with system prompt"""
81
85
  openai_messages = []
82
86
 
83
- # Add system prompt if provided
84
- if self.system_prompt:
85
- openai_messages.append({
86
- "role": "system",
87
- "content": self._build_system_prompt()
88
- })
87
+ # Add system prompt
88
+ openai_messages.append({
89
+ "role": "system",
90
+ "content": self._build_system_prompt()
91
+ })
89
92
 
90
93
  # Process user-provided messages
91
94
  for msg in messages:
@@ -124,22 +127,22 @@ class OpenAIProvider(BaseLLMProvider):
124
127
  "function": {
125
128
  "name": tool.name,
126
129
  "description": tool.description,
127
- "parameters": tool.input_schema
130
+ "parameters": tool.inputSchema
128
131
  }
129
132
  })
130
133
  return openai_tools
131
134
 
132
135
 
133
- async def generate(self, messages: List[Message], **kwargs) -> str:
136
+ async def generate(self, messages: List[Dict[str, Any]], **kwargs) -> str:
134
137
  """
135
138
  Generate a complete response, invoking tools as needed.
136
139
  """
137
- # Convert incoming messages with dynamic system prompt
140
+ # Ensure messages are Message objects
141
+ messages = [msg if isinstance(msg, Message) else Message(**msg) for msg in messages]
138
142
  converted_messages = self._convert_messages(messages)
139
143
  max_iterations = 10
140
144
 
141
- for _ in range(max_iterations):
142
- # Call OpenAI chat completion with available tools
145
+ for iteration in range(max_iterations):
143
146
  response = await self.client.chat.completions.create(
144
147
  model=self.model,
145
148
  messages=converted_messages,
@@ -148,47 +151,51 @@ class OpenAIProvider(BaseLLMProvider):
148
151
  )
149
152
  message = response.choices[0].message
150
153
 
151
- # If the assistant didn't call a tool, return its content
152
- if not hasattr(message, 'tool_calls') or not message.tool_calls:
154
+ # Detect and extract the tool/function call
155
+ if getattr(message, 'function_call', None):
156
+ name = message.function_call.name
157
+ args_raw = message.function_call.arguments
158
+ elif getattr(message, 'tool_calls', None):
159
+ tc = message.tool_calls[0]
160
+ name = tc.function.name
161
+ args_raw = tc.function.arguments
162
+ else:
153
163
  return message.content
154
164
 
155
- # Append assistant's tool call to the conversation
165
+ # Append the assistant's intent
156
166
  converted_messages.append({
157
167
  "role": "assistant",
158
- "content": message.content,
159
- "tool_calls": [
160
- {"id": tc.id,
161
- "type": "function",
162
- "function": {"name": tc.function.name,
163
- "arguments": tc.function.arguments}
164
- }
165
- for tc in message.tool_calls
166
- ]
168
+ "content": None,
169
+ "function_call": {"name": name, "arguments": args_raw}
167
170
  })
168
171
 
169
- # Process each tool call sequentially
170
- for tc in message.tool_calls:
171
- tool_name = tc.function.name
172
- # Parse arguments
173
- try:
174
- tool_args = json.loads(tc.function.arguments)
175
- except json.JSONDecodeError:
176
- tool_args = {}
172
+ # Parse arguments safely
173
+ try:
174
+ args = json.loads(args_raw or '{}')
175
+ except json.JSONDecodeError:
176
+ args = {}
177
177
 
178
- # Execute the tool via the ToolsManager
179
- try:
180
- result = await self.tools_manager.call_tool(tool_name, tool_args)
181
- result_content = result.content
182
- except Exception as e:
183
- result_content = f"Error executing {tool_name}: {e}"
178
+ # Call the tool manager with name and parsed args
179
+ try:
180
+ tool_result = await self.tools_manager.call_tool(name, args)
181
+ except Exception as e:
182
+ tool_result = {"content": f"Error calling {name}: {e}"}
183
+
184
+ # Extract content
185
+ if hasattr(tool_result, 'content'):
186
+ result_content = tool_result.content
187
+ elif isinstance(tool_result, dict) and 'content' in tool_result:
188
+ result_content = tool_result['content']
189
+ else:
190
+ result_content = str(tool_result)
191
+
192
+ # Append the function/tool's response
193
+ converted_messages.append({
194
+ "role": "function",
195
+ "name": name,
196
+ "content": result_content
197
+ })
184
198
 
185
- # Append the tool response into the conversation
186
- converted_messages.append({
187
- "role": "tool",
188
- "content": result_content,
189
- "tool_call_id": tc.id
190
- })
191
- # If max iterations reached without a final response
192
199
  raise RuntimeError("Max tool iteration depth reached in generate().")
193
200
 
194
201
 
@@ -83,6 +83,9 @@ class SmartA2A:
83
83
  # Add this method to delegate ASGI calls
84
84
  async def __call__(self, scope, receive, send):
85
85
  return await self.app(scope, receive, send)
86
+
87
+ def on_event(self, event_name: str):
88
+ return self.app.on_event(event_name)
86
89
 
87
90
  def on_send_task(self):
88
91
  def decorator(func: Callable[[SendTaskRequest, Optional[StateData]], Any]) -> Callable:
@@ -150,9 +153,9 @@ class SmartA2A:
150
153
  if method == "tasks/send":
151
154
  state_data = self.state_mgr.init_or_get(params.get("sessionId"), params.get("message"), params.get("metadata") or {})
152
155
  if state_store:
153
- return self._handle_send_task(request, state_data)
156
+ return await self._handle_send_task(request, state_data)
154
157
  else:
155
- return self._handle_send_task(request)
158
+ return await self._handle_send_task(request)
156
159
  elif method == "tasks/sendSubscribe":
157
160
  state_data = self.state_mgr.init_or_get(params.get("sessionId"), params.get("message"), params.get("metadata") or {})
158
161
  if state_store:
@@ -176,7 +179,7 @@ class SmartA2A:
176
179
  return JSONRPCResponse(id=request.id, error=err).model_dump()
177
180
 
178
181
 
179
- def _handle_send_task(self, request_data: JSONRPCRequest, state_data: Optional[StateData] = None) -> SendTaskResponse:
182
+ async def _handle_send_task(self, request_data: JSONRPCRequest, state_data: Optional[StateData] = None) -> SendTaskResponse:
180
183
  try:
181
184
  # Validate request format
182
185
  request = SendTaskRequest.model_validate(request_data.model_dump())
@@ -203,9 +206,9 @@ class SmartA2A:
203
206
  try:
204
207
 
205
208
  if state_data:
206
- raw_result = handler(request, state_data)
209
+ raw_result = await handler(request, state_data)
207
210
  else:
208
- raw_result = handler(request)
211
+ raw_result = await handler(request)
209
212
 
210
213
  # Handle direct SendTaskResponse returns
211
214
  if isinstance(raw_result, SendTaskResponse):
@@ -513,7 +516,6 @@ class SmartA2A:
513
516
  task_id=request.params.id,
514
517
  metadata=getattr(raw_result, "metadata", {}) or {}
515
518
  )
516
- print(task)
517
519
 
518
520
  # Final validation and packaging
519
521
  return self._finalize_cancel_response(request, task)
@@ -57,7 +57,7 @@ Part = Annotated[Union[TextPart, FilePart, DataPart], Field(discriminator="type"
57
57
 
58
58
 
59
59
  class Message(BaseModel):
60
- role: Literal["user", "agent", "system"] # Added system role for system messages
60
+ role: Literal["user", "agent", "system", "tool"] # Added system role for system messages
61
61
  parts: List[Part]
62
62
  metadata: dict[str, Any] | None = None
63
63
 
@@ -1,3 +0,0 @@
1
- """
2
- Example implementations using the SmartA2A framework.
3
- """
@@ -1,3 +0,0 @@
1
- """
2
- Example agent implementations.
3
- """
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes