smarta2a 0.4.28__py3-none-any.whl → 0.4.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smarta2a/model_providers/openai_provider.py +97 -0
- {smarta2a-0.4.28.dist-info → smarta2a-0.4.30.dist-info}/METADATA +1 -1
- {smarta2a-0.4.28.dist-info → smarta2a-0.4.30.dist-info}/RECORD +5 -5
- {smarta2a-0.4.28.dist-info → smarta2a-0.4.30.dist-info}/WHEEL +0 -0
- {smarta2a-0.4.28.dist-info → smarta2a-0.4.30.dist-info}/licenses/LICENSE +0 -0
@@ -228,6 +228,103 @@ class OpenAIProvider(BaseLLMProvider):
|
|
228
228
|
raise RuntimeError("Max tool iteration depth reached in generate().")
|
229
229
|
|
230
230
|
|
231
|
+
async def generate_stream(self, state: StateData, **kwargs) -> AsyncGenerator[str, None]:
|
232
|
+
"""
|
233
|
+
Stream response chunks, invoking tools as needed.
|
234
|
+
"""
|
235
|
+
context_history = state.context_history
|
236
|
+
# Normalize incoming messages to your Message model
|
237
|
+
msgs = [
|
238
|
+
msg if isinstance(msg, Message) else Message(**msg)
|
239
|
+
for msg in context_history
|
240
|
+
]
|
241
|
+
# Convert to OpenAI schema, including any prior tool results
|
242
|
+
converted_messages = self._convert_messages(msgs)
|
243
|
+
max_iterations = 30
|
244
|
+
|
245
|
+
for _ in range(max_iterations):
|
246
|
+
# Kick off the streaming completion
|
247
|
+
stream = await self.client.chat.completions.create(
|
248
|
+
model=self.model,
|
249
|
+
messages=converted_messages,
|
250
|
+
tools=self._format_openai_tools(),
|
251
|
+
tool_choice="auto",
|
252
|
+
stream=True,
|
253
|
+
**kwargs
|
254
|
+
)
|
255
|
+
|
256
|
+
full_content = ""
|
257
|
+
tool_calls: List[Dict[str, Any]] = []
|
258
|
+
|
259
|
+
# As chunks arrive, yield them and collect any tool_call deltas
|
260
|
+
async for chunk in stream:
|
261
|
+
delta = chunk.choices[0].delta
|
262
|
+
|
263
|
+
# 1) Stream content immediately
|
264
|
+
if hasattr(delta, "content") and delta.content:
|
265
|
+
yield delta.content
|
266
|
+
full_content += delta.content
|
267
|
+
|
268
|
+
# 2) Buffer up any function/tool calls for after the stream
|
269
|
+
if hasattr(delta, "tool_calls") and delta.tool_calls:
|
270
|
+
for d in delta.tool_calls:
|
271
|
+
idx = d.index
|
272
|
+
# Ensure list is long enough
|
273
|
+
while len(tool_calls) <= idx:
|
274
|
+
tool_calls.append({
|
275
|
+
"id": "",
|
276
|
+
"function": {"name": "", "arguments": ""}
|
277
|
+
})
|
278
|
+
if d.id:
|
279
|
+
tool_calls[idx]["id"] = d.id
|
280
|
+
if d.function.name:
|
281
|
+
tool_calls[idx]["function"]["name"] = d.function.name
|
282
|
+
if d.function.arguments:
|
283
|
+
tool_calls[idx]["function"]["arguments"] += d.function.arguments
|
284
|
+
|
285
|
+
# If the assistant didn't invoke any tools, we're done
|
286
|
+
if not tool_calls:
|
287
|
+
return
|
288
|
+
|
289
|
+
# Otherwise, append the assistant's outgoing call and loop for tool execution
|
290
|
+
converted_messages.append({
|
291
|
+
"role": "assistant",
|
292
|
+
"content": full_content,
|
293
|
+
"tool_calls": [
|
294
|
+
{
|
295
|
+
"id": tc["id"],
|
296
|
+
"type": "function",
|
297
|
+
"function": {
|
298
|
+
"name": tc["function"]["name"],
|
299
|
+
"arguments": tc["function"]["arguments"]
|
300
|
+
}
|
301
|
+
}
|
302
|
+
for tc in tool_calls
|
303
|
+
]
|
304
|
+
})
|
305
|
+
|
306
|
+
# Execute each tool in turn and append its result
|
307
|
+
for tc in tool_calls:
|
308
|
+
name = tc["function"]["name"]
|
309
|
+
try:
|
310
|
+
args = json.loads(tc["function"]["arguments"] or "{}")
|
311
|
+
except json.JSONDecodeError:
|
312
|
+
args = {}
|
313
|
+
try:
|
314
|
+
tool_res = await self.tools_manager.call_tool(name, args)
|
315
|
+
result_content = getattr(tool_res, "content", None) or (
|
316
|
+
tool_res.get("content") if isinstance(tool_res, dict) else str(tool_res)
|
317
|
+
)
|
318
|
+
except Exception as e:
|
319
|
+
result_content = f"Error executing {name}: {e}"
|
320
|
+
|
321
|
+
converted_messages.append({
|
322
|
+
"role": "tool",
|
323
|
+
"content": result_content,
|
324
|
+
"tool_call_id": tc["id"]
|
325
|
+
})
|
326
|
+
|
327
|
+
raise RuntimeError("Max tool iteration depth reached in generate_stream().")
|
231
328
|
|
232
329
|
|
233
330
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: smarta2a
|
3
|
-
Version: 0.4.
|
3
|
+
Version: 0.4.30
|
4
4
|
Summary: a Python framework that helps you build servers and AI agents that communicate using the A2A protocol
|
5
5
|
Project-URL: Homepage, https://github.com/siddharthsma/smarta2a
|
6
6
|
Project-URL: Bug Tracker, https://github.com/siddharthsma/smarta2a/issues
|
@@ -17,7 +17,7 @@ smarta2a/history_update_strategies/history_update_strategy.py,sha256=n2sfIGu8ztK
|
|
17
17
|
smarta2a/history_update_strategies/rolling_window_strategy.py,sha256=7Ch042JWt4TM_r1-sFKlSIxHj8VX1P3ZoqjCvIdeSqA,540
|
18
18
|
smarta2a/model_providers/__init__.py,sha256=hJj0w00JjqTiBgJmHmOWwL6MU_hwmro9xTiX3XYf6ts,148
|
19
19
|
smarta2a/model_providers/base_llm_provider.py,sha256=iQUqjnypl0f2M929iU0WhHoxAE4ek-NUFJPbEnNQ8-4,412
|
20
|
-
smarta2a/model_providers/openai_provider.py,sha256=
|
20
|
+
smarta2a/model_providers/openai_provider.py,sha256=0KC11HrTwhOF44oJhDZOOaMUavtKBGamFqXOU4ODIK4,12220
|
21
21
|
smarta2a/server/__init__.py,sha256=f2X454Ll4vJc02V4JLJHTN-h8u0TBm4d_FkiO4t686U,53
|
22
22
|
smarta2a/server/handler_registry.py,sha256=OVRG5dTvxB7qUNXgsqWxVNxIyRljUShSYxb1gtbi5XM,820
|
23
23
|
smarta2a/server/json_rpc_request_processor.py,sha256=qRB3sfj_n9ImkIOCdaUKMsDmKcO7CiMhaZ4VdQS7Mb4,6993
|
@@ -35,7 +35,7 @@ smarta2a/utils/task_builder.py,sha256=wqSyfVHNTaXuGESu09dhlaDi7D007gcN3-8tH-nPQ4
|
|
35
35
|
smarta2a/utils/task_request_builder.py,sha256=6cOGOqj2Rg43xWM03GRJQzlIZHBptsMCJRp7oD-TDAQ,3362
|
36
36
|
smarta2a/utils/tools_manager.py,sha256=oR5cbwzPZ36hQAsWAgb-c6wFv5BthmCPraD7DSv-Bv8,4332
|
37
37
|
smarta2a/utils/types.py,sha256=kzA6Vv5xXfu1sJuxhEXrglI9e9S6eZVIljMnsrQVyN0,13650
|
38
|
-
smarta2a-0.4.
|
39
|
-
smarta2a-0.4.
|
40
|
-
smarta2a-0.4.
|
41
|
-
smarta2a-0.4.
|
38
|
+
smarta2a-0.4.30.dist-info/METADATA,sha256=Z9gATeXMfWK1-XkD8h8J3evYcfziIcHAF7MRD8JgQwQ,12783
|
39
|
+
smarta2a-0.4.30.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
40
|
+
smarta2a-0.4.30.dist-info/licenses/LICENSE,sha256=lDbqrxVnzDMY5KJ8JS1WhvkWE8TJaw-O-CHDy-ecsJA,2095
|
41
|
+
smarta2a-0.4.30.dist-info/RECORD,,
|
File without changes
|
File without changes
|