smarta2a 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. smarta2a/__init__.py +4 -4
  2. smarta2a/agent/a2a_agent.py +38 -0
  3. smarta2a/agent/a2a_mcp_server.py +37 -0
  4. smarta2a/archive/mcp_client.py +86 -0
  5. smarta2a/client/__init__.py +0 -0
  6. smarta2a/client/a2a_client.py +267 -0
  7. smarta2a/client/smart_mcp_client.py +60 -0
  8. smarta2a/client/tools_manager.py +58 -0
  9. smarta2a/history_update_strategies/__init__.py +8 -0
  10. smarta2a/history_update_strategies/append_strategy.py +10 -0
  11. smarta2a/history_update_strategies/history_update_strategy.py +15 -0
  12. smarta2a/model_providers/__init__.py +5 -0
  13. smarta2a/model_providers/base_llm_provider.py +15 -0
  14. smarta2a/model_providers/openai_provider.py +281 -0
  15. smarta2a/server/__init__.py +3 -0
  16. smarta2a/server/handler_registry.py +23 -0
  17. smarta2a/{server.py → server/server.py} +224 -254
  18. smarta2a/server/state_manager.py +34 -0
  19. smarta2a/server/subscription_service.py +109 -0
  20. smarta2a/server/task_service.py +155 -0
  21. smarta2a/state_stores/__init__.py +8 -0
  22. smarta2a/state_stores/base_state_store.py +20 -0
  23. smarta2a/state_stores/inmemory_state_store.py +21 -0
  24. smarta2a/utils/__init__.py +32 -0
  25. smarta2a/utils/prompt_helpers.py +38 -0
  26. smarta2a/utils/task_builder.py +153 -0
  27. smarta2a/utils/task_request_builder.py +114 -0
  28. smarta2a/{types.py → utils/types.py} +62 -2
  29. {smarta2a-0.2.1.dist-info → smarta2a-0.2.3.dist-info}/METADATA +13 -7
  30. smarta2a-0.2.3.dist-info/RECORD +32 -0
  31. smarta2a-0.2.1.dist-info/RECORD +0 -7
  32. {smarta2a-0.2.1.dist-info → smarta2a-0.2.3.dist-info}/WHEEL +0 -0
  33. {smarta2a-0.2.1.dist-info → smarta2a-0.2.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,281 @@
1
+ # Library imports
2
+ import json
3
+ from typing import AsyncGenerator, List, Dict, Optional, Union, Any
4
+ from openai import AsyncOpenAI
5
+
6
+ # Local imports
7
+ from smarta2a.utils.types import Message, TextPart, FilePart, DataPart, Part, AgentCard
8
+ from smarta2a.model_providers.base_llm_provider import BaseLLMProvider
9
+ from smarta2a.client.tools_manager import ToolsManager
10
+ from smarta2a.utils.prompt_helpers import build_system_prompt
11
+
12
+ class OpenAIProvider(BaseLLMProvider):
13
+ def __init__(
14
+ self,
15
+ api_key: str,
16
+ model: str = "gpt-4o",
17
+ base_system_prompt: Optional[str] = None,
18
+ mcp_server_urls_or_paths: Optional[List[str]] = None,
19
+ agent_cards: Optional[List[AgentCard]] = None,
20
+ # enable_discovery: bool = False
21
+ ):
22
+ self.client = AsyncOpenAI(api_key=api_key)
23
+ self.model = model
24
+ self.mcp_server_urls_or_paths = mcp_server_urls_or_paths
25
+ self.agent_cards = agent_cards
26
+ # Store the base system prompt; will be enriched by tool descriptions
27
+ self.base_system_prompt = base_system_prompt
28
+ self.supported_media_types = [
29
+ "image/png", "image/jpeg", "image/gif", "image/webp"
30
+ ]
31
+ # Initialize ToolsManager and load MCP tools if given
32
+ self.tools_manager = ToolsManager()
33
+ if mcp_server_urls_or_paths:
34
+ self.tools_manager.load_mcp_tools(mcp_server_urls_or_paths)
35
+
36
+ if agent_cards:
37
+ self.tools_manager.load_a2a_tools(agent_cards)
38
+
39
+ def _build_system_prompt(self) -> str:
40
+ """Get the system prompt with tool descriptions."""
41
+ return build_system_prompt(
42
+ self.base_system_prompt,
43
+ self.tools_manager,
44
+ self.mcp_server_urls_or_paths,
45
+ self.agent_cards
46
+ )
47
+
48
+ def _convert_part(self, part: Union[TextPart, FilePart, DataPart]) -> dict:
49
+ """Convert a single part to OpenAI-compatible format"""
50
+ if isinstance(part, TextPart):
51
+ return {"type": "text", "text": part.text}
52
+
53
+ elif isinstance(part, FilePart):
54
+ if part.file.mimeType not in self.supported_media_types:
55
+ raise ValueError(f"Unsupported media type: {part.file.mimeType}")
56
+
57
+ if part.file.uri:
58
+ return {
59
+ "type": "image_url",
60
+ "image_url": {"url": part.file.uri}
61
+ }
62
+ elif part.file.bytes:
63
+ return {
64
+ "type": "image_url",
65
+ "image_url": {
66
+ "url": f"data:{part.file.mimeType};base64,{part.file.bytes}"
67
+ }
68
+ }
69
+
70
+ elif isinstance(part, DataPart):
71
+ return {
72
+ "type": "text",
73
+ "text": f"[Structured Data]\n{json.dumps(part.data, indent=2)}"
74
+ }
75
+
76
+ raise ValueError(f"Unsupported part type: {type(part)}")
77
+
78
+
79
+ def _convert_messages(self, messages: List[Message]) -> List[dict]:
80
+ """Convert messages to OpenAI format with system prompt"""
81
+ openai_messages = []
82
+
83
+ # Add system prompt if provided
84
+ if self.system_prompt:
85
+ openai_messages.append({
86
+ "role": "system",
87
+ "content": self._build_system_prompt()
88
+ })
89
+
90
+ # Process user-provided messages
91
+ for msg in messages:
92
+ role = "assistant" if msg.role == "agent" else msg.role
93
+ content = []
94
+
95
+ for part in msg.parts:
96
+ try:
97
+ converted = self._convert_part(part)
98
+ content.append(converted)
99
+ except ValueError as e:
100
+ if isinstance(part, FilePart):
101
+ content.append({
102
+ "type": "text",
103
+ "text": f"<Unsupported file: {part.file.name or 'unnamed'}>"
104
+ })
105
+ else:
106
+ raise e
107
+
108
+ openai_messages.append({
109
+ "role": role,
110
+ "content": content
111
+ })
112
+
113
+ return openai_messages
114
+
115
+
116
+ def _format_openai_tools(self) -> List[dict]:
117
+ """
118
+ Convert internal tools metadata to OpenAI's function-call schema.
119
+ """
120
+ openai_tools = []
121
+ for tool in self.tools_manager.get_tools():
122
+ openai_tools.append({
123
+ "type": "function",
124
+ "function": {
125
+ "name": tool.name,
126
+ "description": tool.description,
127
+ "parameters": tool.input_schema
128
+ }
129
+ })
130
+ return openai_tools
131
+
132
+
133
+ async def generate(self, messages: List[Message], **kwargs) -> str:
134
+ """
135
+ Generate a complete response, invoking tools as needed.
136
+ """
137
+ # Convert incoming messages with dynamic system prompt
138
+ converted_messages = self._convert_messages(messages)
139
+ max_iterations = 10
140
+
141
+ for _ in range(max_iterations):
142
+ # Call OpenAI chat completion with available tools
143
+ response = await self.client.chat.completions.create(
144
+ model=self.model,
145
+ messages=converted_messages,
146
+ tools=self._format_openai_tools(),
147
+ **kwargs
148
+ )
149
+ message = response.choices[0].message
150
+
151
+ # If the assistant didn't call a tool, return its content
152
+ if not hasattr(message, 'tool_calls') or not message.tool_calls:
153
+ return message.content
154
+
155
+ # Append assistant's tool call to the conversation
156
+ converted_messages.append({
157
+ "role": "assistant",
158
+ "content": message.content,
159
+ "tool_calls": [
160
+ {"id": tc.id,
161
+ "type": "function",
162
+ "function": {"name": tc.function.name,
163
+ "arguments": tc.function.arguments}
164
+ }
165
+ for tc in message.tool_calls
166
+ ]
167
+ })
168
+
169
+ # Process each tool call sequentially
170
+ for tc in message.tool_calls:
171
+ tool_name = tc.function.name
172
+ # Parse arguments
173
+ try:
174
+ tool_args = json.loads(tc.function.arguments)
175
+ except json.JSONDecodeError:
176
+ tool_args = {}
177
+
178
+ # Execute the tool via the ToolsManager
179
+ try:
180
+ result = await self.tools_manager.call_tool(tool_name, tool_args)
181
+ result_content = result.content
182
+ except Exception as e:
183
+ result_content = f"Error executing {tool_name}: {e}"
184
+
185
+ # Append the tool response into the conversation
186
+ converted_messages.append({
187
+ "role": "tool",
188
+ "content": result_content,
189
+ "tool_call_id": tc.id
190
+ })
191
+ # If max iterations reached without a final response
192
+ raise RuntimeError("Max tool iteration depth reached in generate().")
193
+
194
+
195
+
196
+ async def generate_stream(
197
+ self, messages: List[Message], **kwargs
198
+ ) -> AsyncGenerator[str, None]:
199
+ """
200
+ Stream response chunks, handling tool calls when complete.
201
+ """
202
+ # Prepare messages including dynamic system prompt
203
+ converted_messages = self._convert_messages(messages)
204
+ max_iterations = 10
205
+
206
+ for _ in range(max_iterations):
207
+ # Start streaming completion with function-call support
208
+ stream = await self.client.chat.completions.create(
209
+ model=self.model,
210
+ messages=converted_messages,
211
+ tools=self._format_openai_tools(),
212
+ tool_choice="auto",
213
+ stream=True,
214
+ **kwargs
215
+ )
216
+
217
+ full_content = []
218
+ tool_calls: List[Dict[str, Any]] = []
219
+
220
+ # Collect streamed tokens and tool call deltas
221
+ async for chunk in stream:
222
+ delta = chunk.choices[0].delta
223
+ # Yield text content immediately
224
+ if hasattr(delta, 'content') and delta.content:
225
+ full_content.append(delta.content)
226
+ yield delta.content
227
+
228
+ # Accumulate tool call metadata
229
+ if hasattr(delta, 'tool_calls') and delta.tool_calls:
230
+ for d in delta.tool_calls:
231
+ idx = d.index
232
+ # Ensure sufficient list length
233
+ while len(tool_calls) <= idx:
234
+ tool_calls.append({"id": "", "function": {"name": "", "arguments": ""}})
235
+ # Assign fields if present
236
+ if d.id:
237
+ tool_calls[idx]["id"] = d.id
238
+ if d.function.name:
239
+ tool_calls[idx]["function"]["name"] = d.function.name
240
+ if d.function.arguments:
241
+ tool_calls[idx]["function"]["arguments"] += d.function.arguments
242
+
243
+ # If no tool calls were invoked, stream is complete
244
+ if not tool_calls:
245
+ return
246
+
247
+ # Append completed assistant message with tool calls
248
+ converted_messages.append({
249
+ "role": "assistant",
250
+ "content": "".join(full_content),
251
+ "tool_calls": [
252
+ {"id": tc["id"],
253
+ "type": "function",
254
+ "function": {"name": tc["function"]["name"],
255
+ "arguments": tc["function"]["arguments"]}
256
+ }
257
+ for tc in tool_calls
258
+ ]
259
+ })
260
+
261
+ # Execute each tool call and append responses
262
+ for tc in tool_calls:
263
+ name = tc["function"]["name"]
264
+ try:
265
+ args = json.loads(tc["function"]["arguments"])
266
+ except json.JSONDecodeError:
267
+ args = {}
268
+
269
+ try:
270
+ result = await self.tools_manager.call_tool(name, args)
271
+ result_content = result.content
272
+ except Exception as e:
273
+ result_content = f"Error executing {name}: {e}"
274
+
275
+ converted_messages.append({
276
+ "role": "tool",
277
+ "content": result_content,
278
+ "tool_call_id": tc["id"]
279
+ })
280
+ # If iterations exhausted without final completion
281
+ raise RuntimeError("Max tool iteration depth reached in generate_stream().")
@@ -0,0 +1,3 @@
1
+ from .server import SmartA2A
2
+
3
+ __all__ = ["SmartA2A"]
@@ -0,0 +1,23 @@
1
+ # Library imports
2
+ from typing import Dict, Callable, Optional
3
+
4
+ # Local imports
5
+
6
+ class HandlerRegistry:
7
+ def __init__(self):
8
+ self._handlers: Dict[str, Callable] = {}
9
+ self._subscriptions: Dict[str, Callable] = {}
10
+ self._registered: set = set()
11
+
12
+ def register(self, method: str, func: Callable, subscription: bool = False):
13
+ if method in self._registered:
14
+ raise RuntimeError(f"Method '{method}' already registered")
15
+ target = self._subscriptions if subscription else self._handlers
16
+ target[method] = func
17
+ self._registered.add(method)
18
+
19
+ def get_handler(self, method: str) -> Optional[Callable]:
20
+ return self._handlers.get(method)
21
+
22
+ def get_subscription(self, method: str) -> Optional[Callable]:
23
+ return self._subscriptions.get(method)