smarta2a 0.2.4__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {smarta2a-0.2.4 → smarta2a-0.3.0}/PKG-INFO +1 -1
  2. {smarta2a-0.2.4 → smarta2a-0.3.0}/pyproject.toml +1 -1
  3. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/agent/a2a_agent.py +16 -8
  4. {smarta2a-0.2.4/smarta2a/client → smarta2a-0.3.0/smarta2a/archive}/smart_mcp_client.py +4 -1
  5. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/client/a2a_client.py +86 -52
  6. {smarta2a-0.2.4/smarta2a/archive → smarta2a-0.3.0/smarta2a/client}/mcp_client.py +12 -4
  7. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/client/tools_manager.py +17 -13
  8. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/examples/echo_server/main.py +5 -3
  9. smarta2a-0.3.0/smarta2a/examples/openai_delegator_agent/__init__.py +0 -0
  10. smarta2a-0.3.0/smarta2a/examples/openai_delegator_agent/main.py +41 -0
  11. smarta2a-0.3.0/smarta2a/examples/openai_weather_agent/__init__.py +0 -0
  12. smarta2a-0.3.0/smarta2a/examples/openai_weather_agent/main.py +32 -0
  13. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/model_providers/openai_provider.py +55 -48
  14. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/server/server.py +8 -6
  15. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/utils/types.py +6 -1
  16. smarta2a-0.2.4/examples/__init__.py +0 -3
  17. smarta2a-0.2.4/examples/agents/__init__.py +0 -3
  18. {smarta2a-0.2.4 → smarta2a-0.3.0}/.gitignore +0 -0
  19. {smarta2a-0.2.4 → smarta2a-0.3.0}/LICENSE +0 -0
  20. {smarta2a-0.2.4 → smarta2a-0.3.0}/README.md +0 -0
  21. {smarta2a-0.2.4 → smarta2a-0.3.0}/documentation/smarta2a_docs/docs/blog/announcements.md +0 -0
  22. {smarta2a-0.2.4 → smarta2a-0.3.0}/documentation/smarta2a_docs/docs/getting-started/index.md +0 -0
  23. {smarta2a-0.2.4 → smarta2a-0.3.0}/documentation/smarta2a_docs/docs/getting-started/installation.md +0 -0
  24. {smarta2a-0.2.4 → smarta2a-0.3.0}/documentation/smarta2a_docs/docs/getting-started/quickstart.md +0 -0
  25. {smarta2a-0.2.4 → smarta2a-0.3.0}/documentation/smarta2a_docs/docs/index.md +0 -0
  26. {smarta2a-0.2.4 → smarta2a-0.3.0}/documentation/smarta2a_docs/docs/tutorials/example1.md +0 -0
  27. {smarta2a-0.2.4 → smarta2a-0.3.0}/documentation/smarta2a_docs/docs/tutorials/example2.md +0 -0
  28. {smarta2a-0.2.4 → smarta2a-0.3.0}/documentation/smarta2a_docs/mkdocs.yml +0 -0
  29. {smarta2a-0.2.4 → smarta2a-0.3.0}/requirements.txt +0 -0
  30. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/__init__.py +0 -0
  31. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/agent/a2a_mcp_server.py +0 -0
  32. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/client/__init__.py +0 -0
  33. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/examples/__init__.py +0 -0
  34. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/examples/echo_server/__init__.py +0 -0
  35. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/examples/echo_server/curl.txt +0 -0
  36. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/history_update_strategies/__init__.py +0 -0
  37. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/history_update_strategies/append_strategy.py +0 -0
  38. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/history_update_strategies/history_update_strategy.py +0 -0
  39. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/model_providers/__init__.py +0 -0
  40. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/model_providers/base_llm_provider.py +0 -0
  41. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/server/__init__.py +0 -0
  42. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/server/handler_registry.py +0 -0
  43. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/server/state_manager.py +0 -0
  44. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/server/subscription_service.py +0 -0
  45. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/server/task_service.py +0 -0
  46. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/state_stores/__init__.py +0 -0
  47. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/state_stores/base_state_store.py +0 -0
  48. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/state_stores/inmemory_state_store.py +0 -0
  49. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/utils/__init__.py +0 -0
  50. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/utils/prompt_helpers.py +0 -0
  51. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/utils/task_builder.py +0 -0
  52. {smarta2a-0.2.4 → smarta2a-0.3.0}/smarta2a/utils/task_request_builder.py +0 -0
  53. {smarta2a-0.2.4 → smarta2a-0.3.0}/tests/__init__.py +0 -0
  54. {smarta2a-0.2.4 → smarta2a-0.3.0}/tests/test_server.py +0 -0
  55. {smarta2a-0.2.4 → smarta2a-0.3.0}/tests/test_server_history.py +0 -0
  56. {smarta2a-0.2.4 → smarta2a-0.3.0}/tests/test_task_request_builder.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: smarta2a
3
- Version: 0.2.4
3
+ Version: 0.3.0
4
4
  Summary: A simple Python framework (built on top of FastAPI) for creating Agents following Google's Agent2Agent protocol
5
5
  Project-URL: Homepage, https://github.com/siddharthsma/smarta2a
6
6
  Project-URL: Bug Tracker, https://github.com/siddharthsma/smarta2a/issues
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "smarta2a"
7
- version = "0.2.4"
7
+ version = "0.3.0"
8
8
  authors = [
9
9
  { name = "Siddharth Ambegaonkar", email = "siddharthsma@gmail.com" },
10
10
  ]
@@ -5,7 +5,9 @@
5
5
  from smarta2a.server import SmartA2A
6
6
  from smarta2a.model_providers.base_llm_provider import BaseLLMProvider
7
7
  from smarta2a.history_update_strategies.history_update_strategy import HistoryUpdateStrategy
8
+ from smarta2a.history_update_strategies.append_strategy import AppendStrategy
8
9
  from smarta2a.state_stores.base_state_store import BaseStateStore
10
+ from smarta2a.state_stores.inmemory_state_store import InMemoryStateStore
9
11
  from smarta2a.utils.types import StateData, SendTaskRequest
10
12
 
11
13
  class A2AAgent:
@@ -13,26 +15,32 @@ class A2AAgent:
13
15
  self,
14
16
  name: str,
15
17
  model_provider: BaseLLMProvider,
16
- history_update_strategy: HistoryUpdateStrategy,
17
- state_storage: BaseStateStore,
18
+ history_update_strategy: HistoryUpdateStrategy = None,
19
+ state_store: BaseStateStore = None,
18
20
  ):
19
21
  self.model_provider = model_provider
22
+ self.history_update_strategy = history_update_strategy or AppendStrategy()
23
+ self.state_store = state_store or InMemoryStateStore()
20
24
  self.app = SmartA2A(
21
25
  name=name,
22
- history_update_strategy=history_update_strategy,
23
- state_storage=state_storage
26
+ history_update_strategy=self.history_update_strategy,
27
+ state_store=self.state_store
24
28
  )
25
29
  self.__register_handlers()
26
30
 
27
31
  def __register_handlers(self):
32
+ @self.app.on_event("startup")
33
+ async def on_startup():
34
+ await self.model_provider.load()
35
+
28
36
  @self.app.on_send_task()
29
37
  async def on_send_task(request: SendTaskRequest, state: StateData):
30
- response = self.model_provider.generate(state.history)
38
+ response = await self.model_provider.generate(state.history)
31
39
  return response
40
+
41
+ def get_app(self):
42
+ return self.app
32
43
 
33
- def start(self, **kwargs):
34
- self.app.configure(**kwargs)
35
- self.app.run()
36
44
 
37
45
 
38
46
 
@@ -1,4 +1,4 @@
1
- from mcp.client import Client
1
+ from mcp.client import ClientSession, sse_client, stdio_client, StdioServerParameters
2
2
  from typing import Optional, Dict, Any
3
3
 
4
4
  class SmartMCPClient:
@@ -7,6 +7,9 @@ class SmartMCPClient:
7
7
  Initialize with the server URL. Headers are provided per request, not globally.
8
8
  """
9
9
  self.base_url = base_url
10
+ self.session = None
11
+ self.exit_stack = AsyncExitStack()
12
+ self._connect_to_server()
10
13
 
11
14
  async def list_tools(self, session_id: Optional[str] = None) -> Any:
12
15
  """
@@ -4,7 +4,8 @@ import httpx
4
4
  import json
5
5
  from httpx_sse import connect_sse
6
6
  from inspect import signature, Parameter, iscoroutinefunction
7
- from pydantic import create_model, Field, BaseModel
7
+ from pydantic import create_model, Field, BaseModel, ValidationError
8
+ from typing import Optional, Union
8
9
 
9
10
  # Local imports
10
11
  from smarta2a.utils.types import (
@@ -181,87 +182,120 @@ class A2AClient:
181
182
  raise A2AClientHTTPError(400, str(e)) from e
182
183
 
183
184
 
184
- def list_tools(self) -> list[dict[str, Any]]:
185
- """Return metadata for all available tools."""
185
+ async def list_tools(self) -> list[dict[str, Any]]:
186
+ """Return metadata for all available tools with minimal inputSchema."""
186
187
  tools = []
187
- tool_names = [
188
- 'send'
189
- ]
188
+ tool_names = ['send'] # add other tool names here
190
189
  for name in tool_names:
191
190
  method = getattr(self, name)
192
191
  doc = method.__doc__ or ""
193
192
  description = doc.strip().split('\n')[0] if doc else ""
194
193
 
195
- # Generate input schema
196
194
  sig = signature(method)
197
- parameters = sig.parameters
198
-
199
- fields = {}
200
- required = []
201
- for param_name, param in parameters.items():
195
+ properties: dict[str, Any] = {}
196
+ required: list[str] = []
197
+ for param_name, param in sig.parameters.items():
202
198
  if param_name == 'self':
203
199
  continue
204
- annotation = param.annotation
205
- if annotation is Parameter.empty:
206
- annotation = Any
207
- # Handle Literal types
208
- if get_origin(annotation) is Literal:
209
- enum_values = get_args(annotation)
210
- annotation = Literal.__getitem__(enum_values)
211
- # Handle default
200
+
201
+ ann = param.annotation
212
202
  default = param.default
203
+
204
+ # Handle Literal types
205
+ if get_origin(ann) is Literal:
206
+ enum_vals = list(get_args(ann))
207
+ schema_field: dict[str, Any] = {
208
+ "title": param_name.replace('_', ' ').title(),
209
+ "enum": enum_vals
210
+ }
211
+ # For Literals we'd typically not mark required if there's a default
212
+ else:
213
+ # map basic Python types to JSON Schema types
214
+ type_map = {
215
+ str: "string",
216
+ int: "integer",
217
+ float: "number",
218
+ bool: "boolean",
219
+ dict: "object",
220
+ list: "array",
221
+ Any: None
222
+ }
223
+ json_type = type_map.get(ann, None)
224
+ schema_field = {"title": param_name.replace('_', ' ').title()}
225
+ if json_type:
226
+ schema_field["type"] = json_type
227
+
228
+ # default vs required
213
229
  if default is Parameter.empty:
214
230
  required.append(param_name)
215
- field = Field(...)
231
+ # no default key
216
232
  else:
217
- field = Field(default=default)
218
- fields[param_name] = (annotation, field)
219
-
220
- # Create dynamic Pydantic model
221
- model = create_model(f"{name}_Input", **fields)
222
- schema = model.schema()
233
+ schema_field["default"] = default
234
+
235
+ properties[param_name] = schema_field
223
236
 
237
+ input_schema = {
238
+ "title": f"{name}_Arguments",
239
+ "type": "object",
240
+ "properties": properties,
241
+ "required": required,
242
+ }
243
+
224
244
  tools.append({
225
- 'name': name,
226
- 'description': description,
227
- 'input_schema': schema
245
+ "name": name,
246
+ "description": description,
247
+ "inputSchema": input_schema
228
248
  })
249
+
229
250
  return tools
230
251
 
231
252
  async def call_tool(self, tool_name: str, arguments: dict[str, Any]) -> Any:
232
253
  """Call a tool by name with validated arguments."""
254
+ # 1) lookup
233
255
  if not hasattr(self, tool_name):
234
256
  raise ValueError(f"Tool {tool_name} not found")
235
257
  method = getattr(self, tool_name)
236
-
237
- # Validate arguments using the same schema as list_tools
258
+
259
+ # 2) build a minimal pydantic model for validation
238
260
  sig = signature(method)
239
- parameters = sig.parameters
240
-
241
- fields = {}
242
- for param_name, param in parameters.items():
261
+ model_fields: dict[str, tuple] = {}
262
+
263
+ for param_name, param in sig.parameters.items():
243
264
  if param_name == 'self':
244
265
  continue
245
- annotation = param.annotation
246
- if annotation is Parameter.empty:
247
- annotation = Any
248
- # Handle Literal
249
- if get_origin(annotation) is Literal:
250
- enum_values = get_args(annotation)
251
- annotation = Literal.__getitem__(enum_values)
266
+
267
+ # annotation
268
+ ann = param.annotation
269
+ if ann is Parameter.empty:
270
+ ann = Any
271
+
272
+ # default
252
273
  default = param.default
253
274
  if default is Parameter.empty:
254
- fields[param_name] = (annotation, Field(...))
275
+ # required field
276
+ model_fields[param_name] = (ann, Field(...))
255
277
  else:
256
- fields[param_name] = (annotation, Field(default=default))
257
-
258
- # Create validation model
259
- model = create_model(f"{tool_name}_ValidationModel", **fields)
260
- validated_args = model(**arguments).dict()
261
-
262
- # Call the method
278
+ # optional field: if default is None, widen annotation
279
+ if default is None and get_origin(ann) is not Union:
280
+ ann = Optional[ann]
281
+ model_fields[param_name] = (ann, Field(default=default))
282
+
283
+ ValidationModel = create_model(
284
+ f"{tool_name}_ValidationModel",
285
+ **model_fields
286
+ )
287
+
288
+ # 3) validate (will raise ValidationError on bad args)
289
+ try:
290
+ validated = ValidationModel(**arguments)
291
+ except ValidationError as e:
292
+ # re-raise or wrap as you like
293
+ raise ValueError(f"Invalid arguments for tool {tool_name}: {e}") from e
294
+
295
+ validated_args = validated.dict()
296
+
297
+ # 4) call
263
298
  if iscoroutinefunction(method):
264
299
  return await method(**validated_args)
265
300
  else:
266
- # Note: Synchronous methods (like subscribe) will block the event loop
267
301
  return method(**validated_args)
@@ -1,14 +1,22 @@
1
1
  # Library imports
2
2
  import re
3
+ from typing import Dict, Any
3
4
  from contextlib import AsyncExitStack
4
- from mcp.client import ClientSession, sse_client, stdio_client, StdioServerParameters
5
+ from mcp import ClientSession, StdioServerParameters
6
+ from mcp.client.stdio import stdio_client
7
+ from mcp.client.sse import sse_client
5
8
 
6
9
 
7
10
  class MCPClient:
8
11
  def __init__(self):
9
12
  self.session = None
10
13
  self.exit_stack = AsyncExitStack()
11
- self._connect_to_server()
14
+
15
+ @classmethod
16
+ async def create(cls, server_path_or_url: str):
17
+ client = cls()
18
+ await client._connect_to_server(server_path_or_url)
19
+ return client
12
20
 
13
21
  async def _connect_to_sse_server(self, server_url: str):
14
22
  """Connect to an SSE MCP server."""
@@ -72,9 +80,9 @@ class MCPClient:
72
80
  response = await self.session.list_tools()
73
81
  return response.tools
74
82
 
75
- async def call_tool(self, tool_name: str, **tool_args):
83
+ async def call_tool(self, tool_name: str, tool_args: Dict[str, Any]):
76
84
  """Call a tool."""
77
- response = await self.session.call_tool(tool_name, **tool_args)
85
+ response = await self.session.call_tool(tool_name, tool_args)
78
86
  return response.content
79
87
 
80
88
  async def cleanup(self):
@@ -3,9 +3,9 @@ import json
3
3
  from typing import List, Dict, Any, Union, Literal
4
4
 
5
5
  # Local imports
6
- from smarta2a.client.smart_mcp_client import SmartMCPClient
6
+ from smarta2a.client.mcp_client import MCPClient
7
7
  from smarta2a.client.a2a_client import A2AClient
8
- from smarta2a.utils.types import AgentCard
8
+ from smarta2a.utils.types import AgentCard, Tool
9
9
 
10
10
  class ToolsManager:
11
11
  """
@@ -14,21 +14,24 @@ class ToolsManager:
14
14
  """
15
15
  def __init__(self):
16
16
  self.tools_list: List[Any] = []
17
- self.clients: Dict[str, Union[SmartMCPClient, A2AClient]] = {}
17
+ self.clients: Dict[str, Union[MCPClient, A2AClient]] = {}
18
18
 
19
- def load_mcp_tools(self, urls_or_paths: List[str]) -> None:
19
+ async def load_mcp_tools(self, urls_or_paths: List[str]) -> None:
20
20
  for url in urls_or_paths:
21
- mcp_client = SmartMCPClient(url)
22
- for tool in mcp_client.list_tools():
21
+ mcp_client = await MCPClient.create(url)
22
+ tools = await mcp_client.list_tools()
23
+ for tool in tools:
23
24
  self.tools_list.append(tool)
24
25
  self.clients[tool.name] = mcp_client
25
26
 
26
- def load_a2a_tools(self, agent_cards: List[AgentCard]) -> None:
27
+ async def load_a2a_tools(self, agent_cards: List[AgentCard]) -> None:
27
28
  for agent_card in agent_cards:
28
29
  a2a_client = A2AClient(agent_card)
29
- for tool in a2a_client.list_tools():
30
+ tools_list = await a2a_client.list_tools()
31
+ tools = [Tool(**tool_dict) for tool_dict in tools_list]
32
+ for tool in tools:
30
33
  self.tools_list.append(tool)
31
- self.clients[tool.name] = a2a_client
34
+ self.clients[tool.name] = a2a_client
32
35
 
33
36
  def get_tools(self) -> List[Any]:
34
37
  return self.tools_list
@@ -37,15 +40,16 @@ class ToolsManager:
37
40
  def describe_tools(self, client_type: Literal["mcp", "a2a"]) -> str:
38
41
  lines = []
39
42
  for tool in self.tools_list:
40
- if client_type == "mcp" and isinstance(tool, SmartMCPClient):
41
- schema = json.dumps(tool.input_schema, indent=2)
43
+ schema = json.dumps(tool.inputSchema, indent=2) # Fix: use inputSchema
44
+ if client_type == "mcp":
42
45
  lines.append(
43
46
  f"- **{tool.name}**: {tool.description}\n Parameters schema:\n ```json\n{schema}\n```"
44
47
  )
45
- elif client_type == "a2a" and isinstance(tool, A2AClient):
48
+ elif client_type == "a2a":
46
49
  lines.append(
47
- f"- **{tool.name}**: {tool.description} Parameters schema:\n ```json\n{schema}\n```"
50
+ f"- **{tool.name}**: {tool.description}\n Parameters schema:\n ```json\n{schema}\n```"
48
51
  )
52
+
49
53
  return "\n".join(lines)
50
54
 
51
55
  def get_client(self, tool_name: str) -> Any:
@@ -1,10 +1,12 @@
1
1
  from smarta2a.server import SmartA2A
2
2
  from smarta2a.utils.types import A2AResponse, TaskStatus, TaskState, TextPart, FileContent, FilePart
3
+ from smarta2a.state_stores.inmemory_state_store import InMemoryStateStore
3
4
 
4
- app = SmartA2A("EchoServer")
5
+ state_store = InMemoryStateStore()
6
+ app = SmartA2A("EchoServer", state_store=state_store)
5
7
 
6
8
  @app.on_send_task()
7
- def handle_task(request):
9
+ async def handle_task(request, state):
8
10
  """Echo the input text back as a completed task"""
9
11
  input_text = request.content[0].text
10
12
  #return f"Response to task: {input_text}"
@@ -14,7 +16,7 @@ def handle_task(request):
14
16
  )
15
17
 
16
18
  @app.on_send_subscribe_task()
17
- async def handle_subscribe_task(request):
19
+ async def handle_subscribe_task(request, state):
18
20
  """Subscribe to the task"""
19
21
  input_text = request.content[0].text
20
22
  yield f"First response to the task: {input_text}"
@@ -0,0 +1,41 @@
1
+ # Imports
2
+ from dotenv import load_dotenv
3
+ import os
4
+ import uvicorn
5
+ from smarta2a.agent.a2a_agent import A2AAgent
6
+ from smarta2a.model_providers.openai_provider import OpenAIProvider
7
+ from smarta2a.utils.types import AgentCard, AgentCapabilities, AgentSkill
8
+
9
+
10
+ # Load environment variables from the .env file
11
+ load_dotenv()
12
+
13
+ # Fetch the value using os.getenv
14
+ api_key = os.getenv("OPENAI_API_KEY")
15
+
16
+ weather_agent_card = AgentCard(
17
+ name="weather_agent",
18
+ description="A weather agent that can help with weather related queries",
19
+ version="0.1.0",
20
+ url="http://localhost:8000",
21
+ capabilities=AgentCapabilities(),
22
+ skills=[AgentSkill(id="weather_forecasting", name="Weather Forecasting", description="Can get weather forecast for a given latitude and longitude"),
23
+ AgentSkill(id="weather_alerts", name="Weather Alerts", description="Can get weather alerts for a US state")]
24
+ )
25
+
26
+
27
+ openai_provider = OpenAIProvider(
28
+ api_key=api_key,
29
+ model="gpt-4o-mini",
30
+ agent_cards=[weather_agent_card]
31
+ )
32
+
33
+ # Create the agent
34
+ agent = A2AAgent(
35
+ name="openai_agent",
36
+ model_provider=openai_provider,
37
+ )
38
+
39
+ # Entry point
40
+ if __name__ == "__main__":
41
+ uvicorn.run(agent.get_app(), host="0.0.0.0", port=8080)
@@ -0,0 +1,32 @@
1
+ # Imports
2
+ from dotenv import load_dotenv
3
+ import os
4
+ import uvicorn
5
+ from smarta2a.agent.a2a_agent import A2AAgent
6
+ from smarta2a.model_providers.openai_provider import OpenAIProvider
7
+
8
+
9
+
10
+ # Load environment variables from the .env file
11
+ load_dotenv()
12
+
13
+ # Fetch the value using os.getenv
14
+ api_key = os.getenv("OPENAI_API_KEY")
15
+
16
+
17
+ openai_provider = OpenAIProvider(
18
+ api_key=api_key,
19
+ model="gpt-4o-mini",
20
+ base_system_prompt="You are a cheerful assistant that specialises in helping with weather related queries",
21
+ mcp_server_urls_or_paths=["/Users/apple/Desktop/Code/weather/weather.py"],
22
+ )
23
+
24
+ # Create the agent
25
+ agent = A2AAgent(
26
+ name="openai_agent",
27
+ model_provider=openai_provider,
28
+ )
29
+
30
+ # Entry point
31
+ if __name__ == "__main__":
32
+ uvicorn.run(agent.get_app(), host="0.0.0.0", port=8000)
@@ -28,13 +28,17 @@ class OpenAIProvider(BaseLLMProvider):
28
28
  self.supported_media_types = [
29
29
  "image/png", "image/jpeg", "image/gif", "image/webp"
30
30
  ]
31
- # Initialize ToolsManager and load MCP tools if given
31
+ # Initialize ToolsManager
32
32
  self.tools_manager = ToolsManager()
33
- if mcp_server_urls_or_paths:
34
- self.tools_manager.load_mcp_tools(mcp_server_urls_or_paths)
35
33
 
36
- if agent_cards:
37
- self.tools_manager.load_a2a_tools(agent_cards)
34
+
35
+ async def load(self):
36
+ if self.mcp_server_urls_or_paths:
37
+ await self.tools_manager.load_mcp_tools(self.mcp_server_urls_or_paths)
38
+
39
+ if self.agent_cards:
40
+ await self.tools_manager.load_a2a_tools(self.agent_cards)
41
+
38
42
 
39
43
  def _build_system_prompt(self) -> str:
40
44
  """Get the system prompt with tool descriptions."""
@@ -80,12 +84,11 @@ class OpenAIProvider(BaseLLMProvider):
80
84
  """Convert messages to OpenAI format with system prompt"""
81
85
  openai_messages = []
82
86
 
83
- # Add system prompt if provided
84
- if self.system_prompt:
85
- openai_messages.append({
86
- "role": "system",
87
- "content": self._build_system_prompt()
88
- })
87
+ # Add system prompt
88
+ openai_messages.append({
89
+ "role": "system",
90
+ "content": self._build_system_prompt()
91
+ })
89
92
 
90
93
  # Process user-provided messages
91
94
  for msg in messages:
@@ -130,16 +133,16 @@ class OpenAIProvider(BaseLLMProvider):
130
133
  return openai_tools
131
134
 
132
135
 
133
- async def generate(self, messages: List[Message], **kwargs) -> str:
136
+ async def generate(self, messages: List[Dict[str, Any]], **kwargs) -> str:
134
137
  """
135
138
  Generate a complete response, invoking tools as needed.
136
139
  """
137
- # Convert incoming messages with dynamic system prompt
140
+ # Ensure messages are Message objects
141
+ messages = [msg if isinstance(msg, Message) else Message(**msg) for msg in messages]
138
142
  converted_messages = self._convert_messages(messages)
139
143
  max_iterations = 10
140
144
 
141
- for _ in range(max_iterations):
142
- # Call OpenAI chat completion with available tools
145
+ for iteration in range(max_iterations):
143
146
  response = await self.client.chat.completions.create(
144
147
  model=self.model,
145
148
  messages=converted_messages,
@@ -148,47 +151,51 @@ class OpenAIProvider(BaseLLMProvider):
148
151
  )
149
152
  message = response.choices[0].message
150
153
 
151
- # If the assistant didn't call a tool, return its content
152
- if not hasattr(message, 'tool_calls') or not message.tool_calls:
154
+ # Detect and extract the tool/function call
155
+ if getattr(message, 'function_call', None):
156
+ name = message.function_call.name
157
+ args_raw = message.function_call.arguments
158
+ elif getattr(message, 'tool_calls', None):
159
+ tc = message.tool_calls[0]
160
+ name = tc.function.name
161
+ args_raw = tc.function.arguments
162
+ else:
153
163
  return message.content
154
164
 
155
- # Append assistant's tool call to the conversation
165
+ # Append the assistant's intent
156
166
  converted_messages.append({
157
167
  "role": "assistant",
158
- "content": message.content,
159
- "tool_calls": [
160
- {"id": tc.id,
161
- "type": "function",
162
- "function": {"name": tc.function.name,
163
- "arguments": tc.function.arguments}
164
- }
165
- for tc in message.tool_calls
166
- ]
168
+ "content": None,
169
+ "function_call": {"name": name, "arguments": args_raw}
167
170
  })
168
171
 
169
- # Process each tool call sequentially
170
- for tc in message.tool_calls:
171
- tool_name = tc.function.name
172
- # Parse arguments
173
- try:
174
- tool_args = json.loads(tc.function.arguments)
175
- except json.JSONDecodeError:
176
- tool_args = {}
172
+ # Parse arguments safely
173
+ try:
174
+ args = json.loads(args_raw or '{}')
175
+ except json.JSONDecodeError:
176
+ args = {}
177
177
 
178
- # Execute the tool via the ToolsManager
179
- try:
180
- result = await self.tools_manager.call_tool(tool_name, tool_args)
181
- result_content = result.content
182
- except Exception as e:
183
- result_content = f"Error executing {tool_name}: {e}"
178
+ # Call the tool manager with name and parsed args
179
+ try:
180
+ tool_result = await self.tools_manager.call_tool(name, args)
181
+ except Exception as e:
182
+ tool_result = {"content": f"Error calling {name}: {e}"}
183
+
184
+ # Extract content
185
+ if hasattr(tool_result, 'content'):
186
+ result_content = tool_result.content
187
+ elif isinstance(tool_result, dict) and 'content' in tool_result:
188
+ result_content = tool_result['content']
189
+ else:
190
+ result_content = str(tool_result)
191
+
192
+ # Append the function/tool's response
193
+ converted_messages.append({
194
+ "role": "function",
195
+ "name": name,
196
+ "content": result_content
197
+ })
184
198
 
185
- # Append the tool response into the conversation
186
- converted_messages.append({
187
- "role": "tool",
188
- "content": result_content,
189
- "tool_call_id": tc.id
190
- })
191
- # If max iterations reached without a final response
192
199
  raise RuntimeError("Max tool iteration depth reached in generate().")
193
200
 
194
201
 
@@ -83,6 +83,9 @@ class SmartA2A:
83
83
  # Add this method to delegate ASGI calls
84
84
  async def __call__(self, scope, receive, send):
85
85
  return await self.app(scope, receive, send)
86
+
87
+ def on_event(self, event_name: str):
88
+ return self.app.on_event(event_name)
86
89
 
87
90
  def on_send_task(self):
88
91
  def decorator(func: Callable[[SendTaskRequest, Optional[StateData]], Any]) -> Callable:
@@ -150,9 +153,9 @@ class SmartA2A:
150
153
  if method == "tasks/send":
151
154
  state_data = self.state_mgr.init_or_get(params.get("sessionId"), params.get("message"), params.get("metadata") or {})
152
155
  if state_store:
153
- return self._handle_send_task(request, state_data)
156
+ return await self._handle_send_task(request, state_data)
154
157
  else:
155
- return self._handle_send_task(request)
158
+ return await self._handle_send_task(request)
156
159
  elif method == "tasks/sendSubscribe":
157
160
  state_data = self.state_mgr.init_or_get(params.get("sessionId"), params.get("message"), params.get("metadata") or {})
158
161
  if state_store:
@@ -176,7 +179,7 @@ class SmartA2A:
176
179
  return JSONRPCResponse(id=request.id, error=err).model_dump()
177
180
 
178
181
 
179
- def _handle_send_task(self, request_data: JSONRPCRequest, state_data: Optional[StateData] = None) -> SendTaskResponse:
182
+ async def _handle_send_task(self, request_data: JSONRPCRequest, state_data: Optional[StateData] = None) -> SendTaskResponse:
180
183
  try:
181
184
  # Validate request format
182
185
  request = SendTaskRequest.model_validate(request_data.model_dump())
@@ -203,9 +206,9 @@ class SmartA2A:
203
206
  try:
204
207
 
205
208
  if state_data:
206
- raw_result = handler(request, state_data)
209
+ raw_result = await handler(request, state_data)
207
210
  else:
208
- raw_result = handler(request)
211
+ raw_result = await handler(request)
209
212
 
210
213
  # Handle direct SendTaskResponse returns
211
214
  if isinstance(raw_result, SendTaskResponse):
@@ -513,7 +516,6 @@ class SmartA2A:
513
516
  task_id=request.params.id,
514
517
  metadata=getattr(raw_result, "metadata", {}) or {}
515
518
  )
516
- print(task)
517
519
 
518
520
  # Final validation and packaging
519
521
  return self._finalize_cancel_response(request, task)
@@ -57,7 +57,7 @@ Part = Annotated[Union[TextPart, FilePart, DataPart], Field(discriminator="type"
57
57
 
58
58
 
59
59
  class Message(BaseModel):
60
- role: Literal["user", "agent", "system"] # Added system role for system messages
60
+ role: Literal["user", "agent", "system", "tool"] # Added system role for system messages
61
61
  parts: List[Part]
62
62
  metadata: dict[str, Any] | None = None
63
63
 
@@ -482,3 +482,8 @@ class StateData(BaseModel):
482
482
  sessionId: str
483
483
  history: List[Message]
484
484
  metadata: Dict[str, Any]
485
+
486
+ class Tool(BaseModel):
487
+ name: str
488
+ description: str
489
+ inputSchema: Dict[str, Any]
@@ -1,3 +0,0 @@
1
- """
2
- Example implementations using the SmartA2A framework.
3
- """
@@ -1,3 +0,0 @@
1
- """
2
- Example agent implementations.
3
- """
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes