agentex-sdk 0.4.12__py3-none-any.whl → 0.4.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentex/_constants.py +3 -3
- agentex/_version.py +1 -1
- agentex/lib/adk/_modules/acp.py +28 -2
- agentex/lib/adk/providers/_modules/openai.py +19 -13
- agentex/lib/cli/handlers/deploy_handlers.py +1 -1
- agentex/lib/core/services/adk/acp/acp.py +62 -12
- agentex/lib/core/services/adk/providers/openai.py +152 -212
- agentex/lib/core/temporal/activities/adk/acp/acp_activities.py +14 -0
- agentex/lib/core/temporal/activities/adk/providers/openai_activities.py +4 -0
- agentex/lib/core/temporal/workers/worker.py +23 -2
- agentex/lib/sdk/fastacp/base/base_acp_server.py +22 -2
- agentex/lib/sdk/fastacp/base/constants.py +24 -0
- agentex/lib/types/acp.py +20 -0
- agentex/resources/agents.py +3 -0
- agentex/resources/tasks.py +4 -4
- agentex/types/agent.py +7 -1
- agentex/types/task.py +2 -0
- {agentex_sdk-0.4.12.dist-info → agentex_sdk-0.4.14.dist-info}/METADATA +1 -1
- {agentex_sdk-0.4.12.dist-info → agentex_sdk-0.4.14.dist-info}/RECORD +22 -21
- {agentex_sdk-0.4.12.dist-info → agentex_sdk-0.4.14.dist-info}/WHEEL +0 -0
- {agentex_sdk-0.4.12.dist-info → agentex_sdk-0.4.14.dist-info}/entry_points.txt +0 -0
- {agentex_sdk-0.4.12.dist-info → agentex_sdk-0.4.14.dist-info}/licenses/LICENSE +0 -0
@@ -85,9 +85,7 @@ class OpenAIService:
|
|
85
85
|
self.streaming_service = streaming_service
|
86
86
|
self.tracer = tracer
|
87
87
|
|
88
|
-
def _extract_tool_call_info(
|
89
|
-
self, tool_call_item: Any
|
90
|
-
) -> tuple[str, str, dict[str, Any]]:
|
88
|
+
def _extract_tool_call_info(self, tool_call_item: Any) -> tuple[str, str, dict[str, Any]]:
|
91
89
|
"""
|
92
90
|
Extract call_id, tool_name, and tool_arguments from a tool call item.
|
93
91
|
|
@@ -99,9 +97,9 @@ class OpenAIService:
|
|
99
97
|
"""
|
100
98
|
# Generic handling for different tool call types
|
101
99
|
# Try 'call_id' first, then 'id', then generate placeholder
|
102
|
-
if hasattr(tool_call_item,
|
100
|
+
if hasattr(tool_call_item, "call_id"):
|
103
101
|
call_id = tool_call_item.call_id
|
104
|
-
elif hasattr(tool_call_item,
|
102
|
+
elif hasattr(tool_call_item, "id"):
|
105
103
|
call_id = tool_call_item.id
|
106
104
|
else:
|
107
105
|
call_id = f"unknown_call_{id(tool_call_item)}"
|
@@ -113,26 +111,18 @@ class OpenAIService:
|
|
113
111
|
|
114
112
|
if isinstance(tool_call_item, ResponseFunctionWebSearch):
|
115
113
|
tool_name = "web_search"
|
116
|
-
tool_arguments = {
|
117
|
-
"action": tool_call_item.action.model_dump(),
|
118
|
-
"status": tool_call_item.status
|
119
|
-
}
|
114
|
+
tool_arguments = {"action": tool_call_item.action.model_dump(), "status": tool_call_item.status}
|
120
115
|
elif isinstance(tool_call_item, ResponseCodeInterpreterToolCall):
|
121
116
|
tool_name = "code_interpreter"
|
122
|
-
tool_arguments = {
|
123
|
-
"code": tool_call_item.code,
|
124
|
-
"status": tool_call_item.status
|
125
|
-
}
|
117
|
+
tool_arguments = {"code": tool_call_item.code, "status": tool_call_item.status}
|
126
118
|
else:
|
127
119
|
# Generic handling for any tool call type
|
128
|
-
tool_name = getattr(tool_call_item,
|
120
|
+
tool_name = getattr(tool_call_item, "name", type(tool_call_item).__name__)
|
129
121
|
tool_arguments = tool_call_item.model_dump()
|
130
|
-
|
122
|
+
|
131
123
|
return call_id, tool_name, tool_arguments
|
132
124
|
|
133
|
-
def _extract_tool_response_info(
|
134
|
-
self, tool_call_map: dict[str, Any], tool_output_item: Any
|
135
|
-
) -> tuple[str, str, str]:
|
125
|
+
def _extract_tool_response_info(self, tool_call_map: dict[str, Any], tool_output_item: Any) -> tuple[str, str, str]:
|
136
126
|
"""
|
137
127
|
Extract call_id, tool_name, and content from a tool output item.
|
138
128
|
|
@@ -145,14 +135,14 @@ class OpenAIService:
|
|
145
135
|
"""
|
146
136
|
# Extract call_id and content from the tool_output_item
|
147
137
|
# Handle both dictionary access and attribute access
|
148
|
-
if hasattr(tool_output_item,
|
138
|
+
if hasattr(tool_output_item, "get") and callable(tool_output_item.get):
|
149
139
|
# Dictionary-like access
|
150
140
|
call_id = tool_output_item["call_id"]
|
151
141
|
content = tool_output_item["output"]
|
152
142
|
else:
|
153
143
|
# Attribute access for structured objects
|
154
|
-
call_id = getattr(tool_output_item,
|
155
|
-
content = getattr(tool_output_item,
|
144
|
+
call_id = getattr(tool_output_item, "call_id", None)
|
145
|
+
content = getattr(tool_output_item, "output", None)
|
156
146
|
|
157
147
|
# Get the name from the tool call map using generic approach
|
158
148
|
tool_call = tool_call_map[call_id]
|
@@ -180,14 +170,13 @@ class OpenAIService:
|
|
180
170
|
tools: list[BaseModel] | None = None,
|
181
171
|
output_type: type[Any] | None = None,
|
182
172
|
tool_use_behavior: (
|
183
|
-
Literal["run_llm_again", "stop_on_first_tool"]
|
184
|
-
| StopAtTools
|
185
|
-
| ToolsToFinalOutputFunction
|
173
|
+
Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
|
186
174
|
) = "run_llm_again",
|
187
175
|
mcp_timeout_seconds: int | None = None,
|
188
176
|
input_guardrails: list[InputGuardrail] | None = None,
|
189
177
|
output_guardrails: list[OutputGuardrail] | None = None,
|
190
178
|
max_turns: int | None = None,
|
179
|
+
previous_response_id: str | None = None,
|
191
180
|
) -> RunResult:
|
192
181
|
"""
|
193
182
|
Run an agent without streaming or TaskMessage creation.
|
@@ -206,11 +195,11 @@ class OpenAIService:
|
|
206
195
|
tools: Optional list of tools.
|
207
196
|
output_type: Optional output type.
|
208
197
|
tool_use_behavior: Optional tool use behavior.
|
209
|
-
mcp_timeout_seconds: Optional param to set the timeout threshold
|
198
|
+
mcp_timeout_seconds: Optional param to set the timeout threshold
|
210
199
|
for the MCP servers. Defaults to 5 seconds.
|
211
|
-
input_guardrails: Optional list of input guardrails to run on
|
200
|
+
input_guardrails: Optional list of input guardrails to run on
|
212
201
|
initial user input.
|
213
|
-
output_guardrails: Optional list of output guardrails to run on
|
202
|
+
output_guardrails: Optional list of output guardrails to run on
|
214
203
|
final agent output.
|
215
204
|
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
216
205
|
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
@@ -240,17 +229,9 @@ class OpenAIService:
|
|
240
229
|
) as span:
|
241
230
|
heartbeat_if_in_workflow("run agent")
|
242
231
|
|
243
|
-
async with mcp_server_context(
|
244
|
-
|
245
|
-
|
246
|
-
tools = [
|
247
|
-
tool.to_oai_function_tool()for tool in tools
|
248
|
-
] if tools else []
|
249
|
-
handoffs = (
|
250
|
-
[Agent(**handoff.model_dump()) for handoff in handoffs]
|
251
|
-
if handoffs
|
252
|
-
else []
|
253
|
-
)
|
232
|
+
async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
|
233
|
+
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
|
234
|
+
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
|
254
235
|
|
255
236
|
agent_kwargs = {
|
256
237
|
"name": agent_name,
|
@@ -264,9 +245,7 @@ class OpenAIService:
|
|
264
245
|
"tool_use_behavior": tool_use_behavior,
|
265
246
|
}
|
266
247
|
if model_settings is not None:
|
267
|
-
agent_kwargs["model_settings"] = (
|
268
|
-
model_settings.to_oai_model_settings()
|
269
|
-
)
|
248
|
+
agent_kwargs["model_settings"] = model_settings.to_oai_model_settings()
|
270
249
|
if input_guardrails is not None:
|
271
250
|
agent_kwargs["input_guardrails"] = input_guardrails
|
272
251
|
if output_guardrails is not None:
|
@@ -275,17 +254,26 @@ class OpenAIService:
|
|
275
254
|
agent = Agent(**agent_kwargs)
|
276
255
|
|
277
256
|
# Run without streaming
|
278
|
-
if max_turns is not None:
|
257
|
+
if max_turns is not None and previous_response_id is not None:
|
258
|
+
result = await Runner.run(
|
259
|
+
starting_agent=agent,
|
260
|
+
input=input_list,
|
261
|
+
max_turns=max_turns,
|
262
|
+
previous_response_id=previous_response_id,
|
263
|
+
)
|
264
|
+
elif max_turns is not None:
|
279
265
|
result = await Runner.run(starting_agent=agent, input=input_list, max_turns=max_turns)
|
266
|
+
elif previous_response_id is not None:
|
267
|
+
result = await Runner.run(
|
268
|
+
starting_agent=agent, input=input_list, previous_response_id=previous_response_id
|
269
|
+
)
|
280
270
|
else:
|
281
271
|
result = await Runner.run(starting_agent=agent, input=input_list)
|
282
272
|
|
283
273
|
if span:
|
284
274
|
span.output = {
|
285
275
|
"new_items": [
|
286
|
-
item.raw_item.model_dump()
|
287
|
-
if isinstance(item.raw_item, BaseModel)
|
288
|
-
else item.raw_item
|
276
|
+
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
|
289
277
|
for item in result.new_items
|
290
278
|
],
|
291
279
|
"final_output": result.final_output,
|
@@ -309,14 +297,13 @@ class OpenAIService:
|
|
309
297
|
tools: list[BaseModel] | None = None,
|
310
298
|
output_type: type[Any] | None = None,
|
311
299
|
tool_use_behavior: (
|
312
|
-
Literal["run_llm_again", "stop_on_first_tool"]
|
313
|
-
| StopAtTools
|
314
|
-
| ToolsToFinalOutputFunction
|
300
|
+
Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
|
315
301
|
) = "run_llm_again",
|
316
302
|
mcp_timeout_seconds: int | None = None,
|
317
303
|
input_guardrails: list[InputGuardrail] | None = None,
|
318
304
|
output_guardrails: list[OutputGuardrail] | None = None,
|
319
305
|
max_turns: int | None = None,
|
306
|
+
previous_response_id: str | None = None,
|
320
307
|
) -> RunResult:
|
321
308
|
"""
|
322
309
|
Run an agent with automatic TaskMessage creation.
|
@@ -372,15 +359,9 @@ class OpenAIService:
|
|
372
359
|
) as span:
|
373
360
|
heartbeat_if_in_workflow("run agent auto send")
|
374
361
|
|
375
|
-
async with mcp_server_context(
|
376
|
-
mcp_server_params, mcp_timeout_seconds
|
377
|
-
) as servers:
|
362
|
+
async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
|
378
363
|
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
|
379
|
-
handoffs = (
|
380
|
-
[Agent(**handoff.model_dump()) for handoff in handoffs]
|
381
|
-
if handoffs
|
382
|
-
else []
|
383
|
-
)
|
364
|
+
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
|
384
365
|
agent_kwargs = {
|
385
366
|
"name": agent_name,
|
386
367
|
"instructions": agent_instructions,
|
@@ -393,9 +374,7 @@ class OpenAIService:
|
|
393
374
|
"tool_use_behavior": tool_use_behavior,
|
394
375
|
}
|
395
376
|
if model_settings is not None:
|
396
|
-
agent_kwargs["model_settings"] = (
|
397
|
-
model_settings.to_oai_model_settings()
|
398
|
-
)
|
377
|
+
agent_kwargs["model_settings"] = model_settings.to_oai_model_settings()
|
399
378
|
if input_guardrails is not None:
|
400
379
|
agent_kwargs["input_guardrails"] = input_guardrails
|
401
380
|
if output_guardrails is not None:
|
@@ -404,17 +383,26 @@ class OpenAIService:
|
|
404
383
|
agent = Agent(**agent_kwargs)
|
405
384
|
|
406
385
|
# Run without streaming
|
407
|
-
if max_turns is not None:
|
386
|
+
if max_turns is not None and previous_response_id is not None:
|
387
|
+
result = await Runner.run(
|
388
|
+
starting_agent=agent,
|
389
|
+
input=input_list,
|
390
|
+
max_turns=max_turns,
|
391
|
+
previous_response_id=previous_response_id,
|
392
|
+
)
|
393
|
+
elif max_turns is not None:
|
408
394
|
result = await Runner.run(starting_agent=agent, input=input_list, max_turns=max_turns)
|
395
|
+
elif previous_response_id is not None:
|
396
|
+
result = await Runner.run(
|
397
|
+
starting_agent=agent, input=input_list, previous_response_id=previous_response_id
|
398
|
+
)
|
409
399
|
else:
|
410
400
|
result = await Runner.run(starting_agent=agent, input=input_list)
|
411
401
|
|
412
402
|
if span:
|
413
403
|
span.output = {
|
414
404
|
"new_items": [
|
415
|
-
item.raw_item.model_dump()
|
416
|
-
if isinstance(item.raw_item, BaseModel)
|
417
|
-
else item.raw_item
|
405
|
+
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
|
418
406
|
for item in result.new_items
|
419
407
|
],
|
420
408
|
"final_output": result.final_output,
|
@@ -429,12 +417,10 @@ class OpenAIService:
|
|
429
417
|
content=item.raw_item.content[0].text,
|
430
418
|
)
|
431
419
|
# Create message for the final result using streaming context
|
432
|
-
async with (
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
) as streaming_context
|
437
|
-
):
|
420
|
+
async with self.streaming_service.streaming_task_message_context(
|
421
|
+
task_id=task_id,
|
422
|
+
initial_content=text_content,
|
423
|
+
) as streaming_context:
|
438
424
|
await streaming_context.stream_update(
|
439
425
|
update=StreamTaskMessageFull(
|
440
426
|
parent_task_message=streaming_context.task_message,
|
@@ -445,7 +431,7 @@ class OpenAIService:
|
|
445
431
|
|
446
432
|
elif item.type == "tool_call_item":
|
447
433
|
tool_call_item = item.raw_item
|
448
|
-
|
434
|
+
|
449
435
|
# Extract tool call information using the helper method
|
450
436
|
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
|
451
437
|
tool_call_map[call_id] = tool_call_item
|
@@ -458,12 +444,10 @@ class OpenAIService:
|
|
458
444
|
)
|
459
445
|
|
460
446
|
# Create tool request using streaming context
|
461
|
-
async with (
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
) as streaming_context
|
466
|
-
):
|
447
|
+
async with self.streaming_service.streaming_task_message_context(
|
448
|
+
task_id=task_id,
|
449
|
+
initial_content=tool_request_content,
|
450
|
+
) as streaming_context:
|
467
451
|
await streaming_context.stream_update(
|
468
452
|
update=StreamTaskMessageFull(
|
469
453
|
parent_task_message=streaming_context.task_message,
|
@@ -476,9 +460,7 @@ class OpenAIService:
|
|
476
460
|
tool_output_item = item.raw_item
|
477
461
|
|
478
462
|
# Extract tool response information using the helper method
|
479
|
-
call_id, tool_name, content = self._extract_tool_response_info(
|
480
|
-
tool_call_map, tool_output_item
|
481
|
-
)
|
463
|
+
call_id, tool_name, content = self._extract_tool_response_info(tool_call_map, tool_output_item)
|
482
464
|
|
483
465
|
tool_response_content = ToolResponseContent(
|
484
466
|
author="agent",
|
@@ -487,12 +469,9 @@ class OpenAIService:
|
|
487
469
|
content=content,
|
488
470
|
)
|
489
471
|
# Create tool response using streaming context
|
490
|
-
async with (
|
491
|
-
|
492
|
-
|
493
|
-
initial_content=tool_response_content
|
494
|
-
) as streaming_context
|
495
|
-
):
|
472
|
+
async with self.streaming_service.streaming_task_message_context(
|
473
|
+
task_id=task_id, initial_content=tool_response_content
|
474
|
+
) as streaming_context:
|
496
475
|
await streaming_context.stream_update(
|
497
476
|
update=StreamTaskMessageFull(
|
498
477
|
parent_task_message=streaming_context.task_message,
|
@@ -519,14 +498,13 @@ class OpenAIService:
|
|
519
498
|
tools: list[BaseModel] | None = None,
|
520
499
|
output_type: type[Any] | None = None,
|
521
500
|
tool_use_behavior: (
|
522
|
-
Literal["run_llm_again", "stop_on_first_tool"]
|
523
|
-
| StopAtTools
|
524
|
-
| ToolsToFinalOutputFunction
|
501
|
+
Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
|
525
502
|
) = "run_llm_again",
|
526
503
|
mcp_timeout_seconds: int | None = None,
|
527
504
|
input_guardrails: list[InputGuardrail] | None = None,
|
528
505
|
output_guardrails: list[OutputGuardrail] | None = None,
|
529
506
|
max_turns: int | None = None,
|
507
|
+
previous_response_id: str | None = None,
|
530
508
|
) -> RunResultStreaming:
|
531
509
|
"""
|
532
510
|
Run an agent with streaming enabled but no TaskMessage creation.
|
@@ -545,11 +523,11 @@ class OpenAIService:
|
|
545
523
|
tools: Optional list of tools.
|
546
524
|
output_type: Optional output type.
|
547
525
|
tool_use_behavior: Optional tool use behavior.
|
548
|
-
mcp_timeout_seconds: Optional param to set the timeout threshold
|
526
|
+
mcp_timeout_seconds: Optional param to set the timeout threshold
|
549
527
|
for the MCP servers. Defaults to 5 seconds.
|
550
|
-
input_guardrails: Optional list of input guardrails to run on
|
528
|
+
input_guardrails: Optional list of input guardrails to run on
|
551
529
|
initial user input.
|
552
|
-
output_guardrails: Optional list of output guardrails to run on
|
530
|
+
output_guardrails: Optional list of output guardrails to run on
|
553
531
|
final agent output.
|
554
532
|
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
555
533
|
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
@@ -579,15 +557,9 @@ class OpenAIService:
|
|
579
557
|
) as span:
|
580
558
|
heartbeat_if_in_workflow("run agent streamed")
|
581
559
|
|
582
|
-
async with mcp_server_context(
|
583
|
-
mcp_server_params, mcp_timeout_seconds
|
584
|
-
) as servers:
|
560
|
+
async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
|
585
561
|
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
|
586
|
-
handoffs = (
|
587
|
-
[Agent(**handoff.model_dump()) for handoff in handoffs]
|
588
|
-
if handoffs
|
589
|
-
else []
|
590
|
-
)
|
562
|
+
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
|
591
563
|
agent_kwargs = {
|
592
564
|
"name": agent_name,
|
593
565
|
"instructions": agent_instructions,
|
@@ -600,9 +572,7 @@ class OpenAIService:
|
|
600
572
|
"tool_use_behavior": tool_use_behavior,
|
601
573
|
}
|
602
574
|
if model_settings is not None:
|
603
|
-
agent_kwargs["model_settings"] = (
|
604
|
-
model_settings.to_oai_model_settings()
|
605
|
-
)
|
575
|
+
agent_kwargs["model_settings"] = model_settings.to_oai_model_settings()
|
606
576
|
if input_guardrails is not None:
|
607
577
|
agent_kwargs["input_guardrails"] = input_guardrails
|
608
578
|
if output_guardrails is not None:
|
@@ -611,17 +581,26 @@ class OpenAIService:
|
|
611
581
|
agent = Agent(**agent_kwargs)
|
612
582
|
|
613
583
|
# Run with streaming (but no TaskMessage creation)
|
614
|
-
if max_turns is not None:
|
584
|
+
if max_turns is not None and previous_response_id is not None:
|
585
|
+
result = Runner.run_streamed(
|
586
|
+
starting_agent=agent,
|
587
|
+
input=input_list,
|
588
|
+
max_turns=max_turns,
|
589
|
+
previous_response_id=previous_response_id,
|
590
|
+
)
|
591
|
+
elif max_turns is not None:
|
615
592
|
result = Runner.run_streamed(starting_agent=agent, input=input_list, max_turns=max_turns)
|
593
|
+
elif previous_response_id is not None:
|
594
|
+
result = Runner.run_streamed(
|
595
|
+
starting_agent=agent, input=input_list, previous_response_id=previous_response_id
|
596
|
+
)
|
616
597
|
else:
|
617
598
|
result = Runner.run_streamed(starting_agent=agent, input=input_list)
|
618
599
|
|
619
600
|
if span:
|
620
601
|
span.output = {
|
621
602
|
"new_items": [
|
622
|
-
item.raw_item.model_dump()
|
623
|
-
if isinstance(item.raw_item, BaseModel)
|
624
|
-
else item.raw_item
|
603
|
+
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
|
625
604
|
for item in result.new_items
|
626
605
|
],
|
627
606
|
"final_output": result.final_output,
|
@@ -645,14 +624,13 @@ class OpenAIService:
|
|
645
624
|
tools: list[BaseModel] | None = None,
|
646
625
|
output_type: type[Any] | None = None,
|
647
626
|
tool_use_behavior: (
|
648
|
-
Literal["run_llm_again", "stop_on_first_tool"]
|
649
|
-
| StopAtTools
|
650
|
-
| ToolsToFinalOutputFunction
|
627
|
+
Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction
|
651
628
|
) = "run_llm_again",
|
652
629
|
mcp_timeout_seconds: int | None = None,
|
653
630
|
input_guardrails: list[InputGuardrail] | None = None,
|
654
631
|
output_guardrails: list[OutputGuardrail] | None = None,
|
655
632
|
max_turns: int | None = None,
|
633
|
+
previous_response_id: str | None = None,
|
656
634
|
) -> RunResultStreaming:
|
657
635
|
"""
|
658
636
|
Run an agent with streaming enabled and automatic TaskMessage creation.
|
@@ -672,11 +650,11 @@ class OpenAIService:
|
|
672
650
|
tools: Optional list of tools.
|
673
651
|
output_type: Optional output type.
|
674
652
|
tool_use_behavior: Optional tool use behavior.
|
675
|
-
mcp_timeout_seconds: Optional param to set the timeout threshold
|
653
|
+
mcp_timeout_seconds: Optional param to set the timeout threshold
|
676
654
|
for the MCP servers. Defaults to 5 seconds.
|
677
|
-
input_guardrails: Optional list of input guardrails to run on
|
655
|
+
input_guardrails: Optional list of input guardrails to run on
|
678
656
|
initial user input.
|
679
|
-
output_guardrails: Optional list of output guardrails to run on
|
657
|
+
output_guardrails: Optional list of output guardrails to run on
|
680
658
|
final agent output.
|
681
659
|
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
682
660
|
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
@@ -715,15 +693,9 @@ class OpenAIService:
|
|
715
693
|
) as span:
|
716
694
|
heartbeat_if_in_workflow("run agent streamed auto send")
|
717
695
|
|
718
|
-
async with mcp_server_context(
|
719
|
-
mcp_server_params, mcp_timeout_seconds
|
720
|
-
) as servers:
|
696
|
+
async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
|
721
697
|
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
|
722
|
-
handoffs = (
|
723
|
-
[Agent(**handoff.model_dump()) for handoff in handoffs]
|
724
|
-
if handoffs
|
725
|
-
else []
|
726
|
-
)
|
698
|
+
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
|
727
699
|
agent_kwargs = {
|
728
700
|
"name": agent_name,
|
729
701
|
"instructions": agent_instructions,
|
@@ -736,9 +708,7 @@ class OpenAIService:
|
|
736
708
|
"tool_use_behavior": tool_use_behavior,
|
737
709
|
}
|
738
710
|
if model_settings is not None:
|
739
|
-
agent_kwargs["model_settings"] = (
|
740
|
-
model_settings.to_oai_model_settings()
|
741
|
-
)
|
711
|
+
agent_kwargs["model_settings"] = model_settings.to_oai_model_settings()
|
742
712
|
if input_guardrails is not None:
|
743
713
|
agent_kwargs["input_guardrails"] = input_guardrails
|
744
714
|
if output_guardrails is not None:
|
@@ -752,22 +722,18 @@ class OpenAIService:
|
|
752
722
|
else:
|
753
723
|
result = Runner.run_streamed(starting_agent=agent, input=input_list)
|
754
724
|
|
755
|
-
item_id_to_streaming_context: dict[
|
756
|
-
str, StreamingTaskMessageContext
|
757
|
-
] = {}
|
725
|
+
item_id_to_streaming_context: dict[str, StreamingTaskMessageContext] = {}
|
758
726
|
unclosed_item_ids: set[str] = set()
|
759
727
|
|
760
728
|
try:
|
761
729
|
# Process streaming events with TaskMessage creation
|
762
730
|
async for event in result.stream_events():
|
763
|
-
heartbeat_if_in_workflow(
|
764
|
-
"processing stream event with auto send"
|
765
|
-
)
|
731
|
+
heartbeat_if_in_workflow("processing stream event with auto send")
|
766
732
|
|
767
733
|
if event.type == "run_item_stream_event":
|
768
734
|
if event.item.type == "tool_call_item":
|
769
735
|
tool_call_item = event.item.raw_item
|
770
|
-
|
736
|
+
|
771
737
|
# Extract tool call information using the helper method
|
772
738
|
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
|
773
739
|
tool_call_map[call_id] = tool_call_item
|
@@ -780,12 +746,10 @@ class OpenAIService:
|
|
780
746
|
)
|
781
747
|
|
782
748
|
# Create tool request using streaming context (immediate completion)
|
783
|
-
async with (
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
) as streaming_context
|
788
|
-
):
|
749
|
+
async with self.streaming_service.streaming_task_message_context(
|
750
|
+
task_id=task_id,
|
751
|
+
initial_content=tool_request_content,
|
752
|
+
) as streaming_context:
|
789
753
|
# The message has already been persisted, but we still need to send an upda
|
790
754
|
await streaming_context.stream_update(
|
791
755
|
update=StreamTaskMessageFull(
|
@@ -811,12 +775,9 @@ class OpenAIService:
|
|
811
775
|
)
|
812
776
|
|
813
777
|
# Create tool response using streaming context (immediate completion)
|
814
|
-
async with (
|
815
|
-
|
816
|
-
|
817
|
-
initial_content=tool_response_content
|
818
|
-
) as streaming_context
|
819
|
-
):
|
778
|
+
async with self.streaming_service.streaming_task_message_context(
|
779
|
+
task_id=task_id, initial_content=tool_response_content
|
780
|
+
) as streaming_context:
|
820
781
|
# The message has already been persisted, but we still need to send an update
|
821
782
|
await streaming_context.stream_update(
|
822
783
|
update=StreamTaskMessageFull(
|
@@ -842,14 +803,10 @@ class OpenAIService:
|
|
842
803
|
),
|
843
804
|
)
|
844
805
|
# Open the streaming context
|
845
|
-
item_id_to_streaming_context[
|
846
|
-
item_id
|
847
|
-
] = await streaming_context.open()
|
806
|
+
item_id_to_streaming_context[item_id] = await streaming_context.open()
|
848
807
|
unclosed_item_ids.add(item_id)
|
849
808
|
else:
|
850
|
-
streaming_context = item_id_to_streaming_context[
|
851
|
-
item_id
|
852
|
-
]
|
809
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
853
810
|
|
854
811
|
# Stream the delta through the streaming service
|
855
812
|
await streaming_context.stream_update(
|
@@ -879,14 +836,10 @@ class OpenAIService:
|
|
879
836
|
),
|
880
837
|
)
|
881
838
|
# Open the streaming context
|
882
|
-
item_id_to_streaming_context[
|
883
|
-
item_id
|
884
|
-
] = await streaming_context.open()
|
839
|
+
item_id_to_streaming_context[item_id] = await streaming_context.open()
|
885
840
|
unclosed_item_ids.add(item_id)
|
886
841
|
else:
|
887
|
-
streaming_context = item_id_to_streaming_context[
|
888
|
-
item_id
|
889
|
-
]
|
842
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
890
843
|
|
891
844
|
# Stream the summary delta through the streaming service
|
892
845
|
await streaming_context.stream_update(
|
@@ -920,14 +873,10 @@ class OpenAIService:
|
|
920
873
|
),
|
921
874
|
)
|
922
875
|
# Open the streaming context
|
923
|
-
item_id_to_streaming_context[
|
924
|
-
item_id
|
925
|
-
] = await streaming_context.open()
|
876
|
+
item_id_to_streaming_context[item_id] = await streaming_context.open()
|
926
877
|
unclosed_item_ids.add(item_id)
|
927
878
|
else:
|
928
|
-
streaming_context = item_id_to_streaming_context[
|
929
|
-
item_id
|
930
|
-
]
|
879
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
931
880
|
|
932
881
|
# Stream the content delta through the streaming service
|
933
882
|
await streaming_context.stream_update(
|
@@ -955,7 +904,7 @@ class OpenAIService:
|
|
955
904
|
# to close the streaming context, but they do!!!
|
956
905
|
# They output both a ResponseReasoningSummaryTextDoneEvent and a ResponseReasoningSummaryPartDoneEvent
|
957
906
|
# I have no idea why they do this.
|
958
|
-
|
907
|
+
|
959
908
|
elif isinstance(event.data, ResponseReasoningTextDoneEvent):
|
960
909
|
# Handle reasoning content text completion
|
961
910
|
item_id = event.data.item_id
|
@@ -965,16 +914,13 @@ class OpenAIService:
|
|
965
914
|
# reasoning content texts. The context will be closed when the entire
|
966
915
|
# output item is done (ResponseOutputItemDoneEvent)
|
967
916
|
|
968
|
-
|
969
917
|
elif isinstance(event.data, ResponseOutputItemDoneEvent):
|
970
918
|
# Handle item completion
|
971
919
|
item_id = event.data.item.id
|
972
920
|
|
973
921
|
# Finish the streaming context (sends DONE event and updates message)
|
974
922
|
if item_id in item_id_to_streaming_context:
|
975
|
-
streaming_context = item_id_to_streaming_context[
|
976
|
-
item_id
|
977
|
-
]
|
923
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
978
924
|
await streaming_context.close()
|
979
925
|
if item_id in unclosed_item_ids:
|
980
926
|
unclosed_item_ids.remove(item_id)
|
@@ -984,39 +930,36 @@ class OpenAIService:
|
|
984
930
|
# Create a copy to avoid modifying set during iteration
|
985
931
|
remaining_items = list(unclosed_item_ids)
|
986
932
|
for item_id in remaining_items:
|
987
|
-
if (
|
988
|
-
|
989
|
-
|
990
|
-
|
991
|
-
]
|
933
|
+
if (
|
934
|
+
item_id in unclosed_item_ids and item_id in item_id_to_streaming_context
|
935
|
+
): # Check if still unclosed
|
936
|
+
streaming_context = item_id_to_streaming_context[item_id]
|
992
937
|
await streaming_context.close()
|
993
938
|
unclosed_item_ids.discard(item_id)
|
994
939
|
|
995
940
|
except InputGuardrailTripwireTriggered as e:
|
996
941
|
# Handle guardrail trigger by sending a rejection message
|
997
942
|
rejection_message = "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question."
|
998
|
-
|
943
|
+
|
999
944
|
# Try to extract rejection message from the guardrail result
|
1000
|
-
if hasattr(e,
|
1001
|
-
output_info = getattr(e.guardrail_result.output,
|
1002
|
-
if isinstance(output_info, dict) and
|
1003
|
-
rejection_message = output_info[
|
1004
|
-
elif hasattr(e.guardrail_result,
|
945
|
+
if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"):
|
946
|
+
output_info = getattr(e.guardrail_result.output, "output_info", {})
|
947
|
+
if isinstance(output_info, dict) and "rejection_message" in output_info:
|
948
|
+
rejection_message = output_info["rejection_message"]
|
949
|
+
elif hasattr(e.guardrail_result, "guardrail"):
|
1005
950
|
# Fall back to using guardrail name if no custom message
|
1006
|
-
triggered_guardrail_name = getattr(e.guardrail_result.guardrail,
|
951
|
+
triggered_guardrail_name = getattr(e.guardrail_result.guardrail, "name", None)
|
1007
952
|
if triggered_guardrail_name:
|
1008
953
|
rejection_message = f"I'm sorry, but I cannot process this request. The '{triggered_guardrail_name}' guardrail was triggered."
|
1009
|
-
|
954
|
+
|
1010
955
|
# Create and send the rejection message as a TaskMessage
|
1011
|
-
async with (
|
1012
|
-
|
1013
|
-
|
1014
|
-
|
1015
|
-
|
1016
|
-
|
1017
|
-
|
1018
|
-
) as streaming_context
|
1019
|
-
):
|
956
|
+
async with self.streaming_service.streaming_task_message_context(
|
957
|
+
task_id=task_id,
|
958
|
+
initial_content=TextContent(
|
959
|
+
author="agent",
|
960
|
+
content=rejection_message,
|
961
|
+
),
|
962
|
+
) as streaming_context:
|
1020
963
|
# Send the full message
|
1021
964
|
await streaming_context.stream_update(
|
1022
965
|
update=StreamTaskMessageFull(
|
@@ -1028,35 +971,33 @@ class OpenAIService:
|
|
1028
971
|
type="full",
|
1029
972
|
),
|
1030
973
|
)
|
1031
|
-
|
974
|
+
|
1032
975
|
# Re-raise to let the activity handle it
|
1033
976
|
raise
|
1034
|
-
|
977
|
+
|
1035
978
|
except OutputGuardrailTripwireTriggered as e:
|
1036
979
|
# Handle output guardrail trigger by sending a rejection message
|
1037
980
|
rejection_message = "I'm sorry, but I cannot provide this response due to a guardrail. Please try a different question."
|
1038
|
-
|
981
|
+
|
1039
982
|
# Try to extract rejection message from the guardrail result
|
1040
|
-
if hasattr(e,
|
1041
|
-
output_info = getattr(e.guardrail_result.output,
|
1042
|
-
if isinstance(output_info, dict) and
|
1043
|
-
rejection_message = output_info[
|
1044
|
-
elif hasattr(e.guardrail_result,
|
983
|
+
if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"):
|
984
|
+
output_info = getattr(e.guardrail_result.output, "output_info", {})
|
985
|
+
if isinstance(output_info, dict) and "rejection_message" in output_info:
|
986
|
+
rejection_message = output_info["rejection_message"]
|
987
|
+
elif hasattr(e.guardrail_result, "guardrail"):
|
1045
988
|
# Fall back to using guardrail name if no custom message
|
1046
|
-
triggered_guardrail_name = getattr(e.guardrail_result.guardrail,
|
989
|
+
triggered_guardrail_name = getattr(e.guardrail_result.guardrail, "name", None)
|
1047
990
|
if triggered_guardrail_name:
|
1048
991
|
rejection_message = f"I'm sorry, but I cannot provide this response. The '{triggered_guardrail_name}' guardrail was triggered."
|
1049
|
-
|
992
|
+
|
1050
993
|
# Create and send the rejection message as a TaskMessage
|
1051
|
-
async with (
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
) as streaming_context
|
1059
|
-
):
|
994
|
+
async with self.streaming_service.streaming_task_message_context(
|
995
|
+
task_id=task_id,
|
996
|
+
initial_content=TextContent(
|
997
|
+
author="agent",
|
998
|
+
content=rejection_message,
|
999
|
+
),
|
1000
|
+
) as streaming_context:
|
1060
1001
|
# Send the full message
|
1061
1002
|
await streaming_context.stream_update(
|
1062
1003
|
update=StreamTaskMessageFull(
|
@@ -1068,7 +1009,7 @@ class OpenAIService:
|
|
1068
1009
|
type="full",
|
1069
1010
|
),
|
1070
1011
|
)
|
1071
|
-
|
1012
|
+
|
1072
1013
|
# Re-raise to let the activity handle it
|
1073
1014
|
raise
|
1074
1015
|
|
@@ -1077,8 +1018,9 @@ class OpenAIService:
|
|
1077
1018
|
# Create a copy to avoid modifying set during iteration
|
1078
1019
|
remaining_items = list(unclosed_item_ids)
|
1079
1020
|
for item_id in remaining_items:
|
1080
|
-
if (
|
1081
|
-
|
1021
|
+
if (
|
1022
|
+
item_id in unclosed_item_ids and item_id in item_id_to_streaming_context
|
1023
|
+
): # Check if still unclosed
|
1082
1024
|
streaming_context = item_id_to_streaming_context[item_id]
|
1083
1025
|
await streaming_context.close()
|
1084
1026
|
unclosed_item_ids.discard(item_id)
|
@@ -1086,9 +1028,7 @@ class OpenAIService:
|
|
1086
1028
|
if span:
|
1087
1029
|
span.output = {
|
1088
1030
|
"new_items": [
|
1089
|
-
item.raw_item.model_dump()
|
1090
|
-
if isinstance(item.raw_item, BaseModel)
|
1091
|
-
else item.raw_item
|
1031
|
+
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
|
1092
1032
|
for item in result.new_items
|
1093
1033
|
],
|
1094
1034
|
"final_output": result.final_output,
|