agentex-sdk 0.4.11__py3-none-any.whl → 0.4.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentex/_constants.py +3 -3
- agentex/_version.py +1 -1
- agentex/lib/adk/_modules/acp.py +43 -5
- agentex/lib/adk/providers/_modules/openai.py +15 -0
- agentex/lib/cli/handlers/deploy_handlers.py +4 -1
- agentex/lib/cli/templates/temporal/environments.yaml.j2 +1 -1
- agentex/lib/core/services/adk/acp/acp.py +85 -20
- agentex/lib/core/services/adk/providers/openai.py +149 -25
- agentex/lib/core/temporal/activities/adk/acp/acp_activities.py +20 -0
- agentex/lib/core/temporal/activities/adk/providers/openai_activities.py +265 -149
- agentex/lib/core/temporal/workers/worker.py +23 -2
- agentex/lib/sdk/fastacp/base/base_acp_server.py +22 -2
- agentex/lib/sdk/fastacp/base/constants.py +24 -0
- agentex/lib/types/acp.py +20 -0
- agentex/resources/agents.py +3 -0
- agentex/resources/tasks.py +4 -4
- agentex/types/agent.py +7 -1
- agentex/types/task.py +2 -0
- {agentex_sdk-0.4.11.dist-info → agentex_sdk-0.4.13.dist-info}/METADATA +1 -1
- {agentex_sdk-0.4.11.dist-info → agentex_sdk-0.4.13.dist-info}/RECORD +23 -22
- {agentex_sdk-0.4.11.dist-info → agentex_sdk-0.4.13.dist-info}/WHEEL +0 -0
- {agentex_sdk-0.4.11.dist-info → agentex_sdk-0.4.13.dist-info}/entry_points.txt +0 -0
- {agentex_sdk-0.4.11.dist-info → agentex_sdk-0.4.13.dist-info}/licenses/LICENSE +0 -0
@@ -1,5 +1,4 @@
|
|
1
1
|
# Standard library imports
|
2
|
-
import json
|
3
2
|
from contextlib import AsyncExitStack, asynccontextmanager
|
4
3
|
from typing import Any, Literal
|
5
4
|
|
@@ -11,7 +10,8 @@ from agents.mcp import MCPServerStdio
|
|
11
10
|
from mcp import StdioServerParameters
|
12
11
|
from openai.types.responses import (
|
13
12
|
ResponseCompletedEvent,
|
14
|
-
|
13
|
+
ResponseFunctionWebSearch,
|
14
|
+
ResponseCodeInterpreterToolCall,
|
15
15
|
ResponseOutputItemDoneEvent,
|
16
16
|
ResponseTextDeltaEvent,
|
17
17
|
ResponseReasoningSummaryTextDeltaEvent,
|
@@ -85,6 +85,86 @@ class OpenAIService:
|
|
85
85
|
self.streaming_service = streaming_service
|
86
86
|
self.tracer = tracer
|
87
87
|
|
88
|
+
def _extract_tool_call_info(
|
89
|
+
self, tool_call_item: Any
|
90
|
+
) -> tuple[str, str, dict[str, Any]]:
|
91
|
+
"""
|
92
|
+
Extract call_id, tool_name, and tool_arguments from a tool call item.
|
93
|
+
|
94
|
+
Args:
|
95
|
+
tool_call_item: The tool call item to process
|
96
|
+
|
97
|
+
Returns:
|
98
|
+
A tuple of (call_id, tool_name, tool_arguments)
|
99
|
+
"""
|
100
|
+
# Generic handling for different tool call types
|
101
|
+
# Try 'call_id' first, then 'id', then generate placeholder
|
102
|
+
if hasattr(tool_call_item, 'call_id'):
|
103
|
+
call_id = tool_call_item.call_id
|
104
|
+
elif hasattr(tool_call_item, 'id'):
|
105
|
+
call_id = tool_call_item.id
|
106
|
+
else:
|
107
|
+
call_id = f"unknown_call_{id(tool_call_item)}"
|
108
|
+
logger.warning(
|
109
|
+
f"Warning: Tool call item {type(tool_call_item)} has "
|
110
|
+
f"neither 'call_id' nor 'id' attribute, using placeholder: "
|
111
|
+
f"{call_id}"
|
112
|
+
)
|
113
|
+
|
114
|
+
if isinstance(tool_call_item, ResponseFunctionWebSearch):
|
115
|
+
tool_name = "web_search"
|
116
|
+
tool_arguments = {
|
117
|
+
"action": tool_call_item.action.model_dump(),
|
118
|
+
"status": tool_call_item.status
|
119
|
+
}
|
120
|
+
elif isinstance(tool_call_item, ResponseCodeInterpreterToolCall):
|
121
|
+
tool_name = "code_interpreter"
|
122
|
+
tool_arguments = {
|
123
|
+
"code": tool_call_item.code,
|
124
|
+
"status": tool_call_item.status
|
125
|
+
}
|
126
|
+
else:
|
127
|
+
# Generic handling for any tool call type
|
128
|
+
tool_name = getattr(tool_call_item, 'name', type(tool_call_item).__name__)
|
129
|
+
tool_arguments = tool_call_item.model_dump()
|
130
|
+
|
131
|
+
return call_id, tool_name, tool_arguments
|
132
|
+
|
133
|
+
def _extract_tool_response_info(
|
134
|
+
self, tool_call_map: dict[str, Any], tool_output_item: Any
|
135
|
+
) -> tuple[str, str, str]:
|
136
|
+
"""
|
137
|
+
Extract call_id, tool_name, and content from a tool output item.
|
138
|
+
|
139
|
+
Args:
|
140
|
+
tool_call_map: Map of call_ids to tool_call items
|
141
|
+
tool_output_item: The tool output item to process
|
142
|
+
|
143
|
+
Returns:
|
144
|
+
A tuple of (call_id, tool_name, content)
|
145
|
+
"""
|
146
|
+
# Extract call_id and content from the tool_output_item
|
147
|
+
# Handle both dictionary access and attribute access
|
148
|
+
if hasattr(tool_output_item, 'get') and callable(tool_output_item.get):
|
149
|
+
# Dictionary-like access
|
150
|
+
call_id = tool_output_item["call_id"]
|
151
|
+
content = tool_output_item["output"]
|
152
|
+
else:
|
153
|
+
# Attribute access for structured objects
|
154
|
+
call_id = getattr(tool_output_item, 'call_id', None)
|
155
|
+
content = getattr(tool_output_item, 'output', None)
|
156
|
+
|
157
|
+
# Get the name from the tool call map using generic approach
|
158
|
+
tool_call = tool_call_map[call_id]
|
159
|
+
if hasattr(tool_call, "name"):
|
160
|
+
tool_name = getattr(tool_call, "name")
|
161
|
+
elif hasattr(tool_call, "type"):
|
162
|
+
tool_name = getattr(tool_call, "type")
|
163
|
+
else:
|
164
|
+
tool_name = type(tool_call).__name__
|
165
|
+
|
166
|
+
return call_id, tool_name, content
|
167
|
+
|
88
168
|
async def run_agent(
|
89
169
|
self,
|
90
170
|
input_list: list[dict[str, Any]],
|
@@ -107,6 +187,7 @@ class OpenAIService:
|
|
107
187
|
mcp_timeout_seconds: int | None = None,
|
108
188
|
input_guardrails: list[InputGuardrail] | None = None,
|
109
189
|
output_guardrails: list[OutputGuardrail] | None = None,
|
190
|
+
max_turns: int | None = None,
|
110
191
|
) -> RunResult:
|
111
192
|
"""
|
112
193
|
Run an agent without streaming or TaskMessage creation.
|
@@ -131,6 +212,8 @@ class OpenAIService:
|
|
131
212
|
initial user input.
|
132
213
|
output_guardrails: Optional list of output guardrails to run on
|
133
214
|
final agent output.
|
215
|
+
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
216
|
+
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
134
217
|
Returns:
|
135
218
|
SerializableRunResult: The result of the agent run.
|
136
219
|
"""
|
@@ -152,6 +235,7 @@ class OpenAIService:
|
|
152
235
|
"tools": tools,
|
153
236
|
"output_type": output_type,
|
154
237
|
"tool_use_behavior": tool_use_behavior,
|
238
|
+
"max_turns": max_turns,
|
155
239
|
},
|
156
240
|
) as span:
|
157
241
|
heartbeat_if_in_workflow("run agent")
|
@@ -159,7 +243,9 @@ class OpenAIService:
|
|
159
243
|
async with mcp_server_context(
|
160
244
|
mcp_server_params, mcp_timeout_seconds
|
161
245
|
) as servers:
|
162
|
-
tools = [
|
246
|
+
tools = [
|
247
|
+
tool.to_oai_function_tool()for tool in tools
|
248
|
+
] if tools else []
|
163
249
|
handoffs = (
|
164
250
|
[Agent(**handoff.model_dump()) for handoff in handoffs]
|
165
251
|
if handoffs
|
@@ -189,7 +275,10 @@ class OpenAIService:
|
|
189
275
|
agent = Agent(**agent_kwargs)
|
190
276
|
|
191
277
|
# Run without streaming
|
192
|
-
|
278
|
+
if max_turns is not None:
|
279
|
+
result = await Runner.run(starting_agent=agent, input=input_list, max_turns=max_turns)
|
280
|
+
else:
|
281
|
+
result = await Runner.run(starting_agent=agent, input=input_list)
|
193
282
|
|
194
283
|
if span:
|
195
284
|
span.output = {
|
@@ -227,6 +316,7 @@ class OpenAIService:
|
|
227
316
|
mcp_timeout_seconds: int | None = None,
|
228
317
|
input_guardrails: list[InputGuardrail] | None = None,
|
229
318
|
output_guardrails: list[OutputGuardrail] | None = None,
|
319
|
+
max_turns: int | None = None,
|
230
320
|
) -> RunResult:
|
231
321
|
"""
|
232
322
|
Run an agent with automatic TaskMessage creation.
|
@@ -249,6 +339,7 @@ class OpenAIService:
|
|
249
339
|
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
250
340
|
input_guardrails: Optional list of input guardrails to run on initial user input.
|
251
341
|
output_guardrails: Optional list of output guardrails to run on final agent output.
|
342
|
+
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
252
343
|
Returns:
|
253
344
|
SerializableRunResult: The result of the agent run.
|
254
345
|
"""
|
@@ -276,6 +367,7 @@ class OpenAIService:
|
|
276
367
|
"tools": tools,
|
277
368
|
"output_type": output_type,
|
278
369
|
"tool_use_behavior": tool_use_behavior,
|
370
|
+
"max_turns": max_turns,
|
279
371
|
},
|
280
372
|
) as span:
|
281
373
|
heartbeat_if_in_workflow("run agent auto send")
|
@@ -312,7 +404,10 @@ class OpenAIService:
|
|
312
404
|
agent = Agent(**agent_kwargs)
|
313
405
|
|
314
406
|
# Run without streaming
|
315
|
-
|
407
|
+
if max_turns is not None:
|
408
|
+
result = await Runner.run(starting_agent=agent, input=input_list, max_turns=max_turns)
|
409
|
+
else:
|
410
|
+
result = await Runner.run(starting_agent=agent, input=input_list)
|
316
411
|
|
317
412
|
if span:
|
318
413
|
span.output = {
|
@@ -325,7 +420,7 @@ class OpenAIService:
|
|
325
420
|
"final_output": result.final_output,
|
326
421
|
}
|
327
422
|
|
328
|
-
tool_call_map: dict[str,
|
423
|
+
tool_call_map: dict[str, Any] = {}
|
329
424
|
|
330
425
|
for item in result.new_items:
|
331
426
|
if item.type == "message_output_item":
|
@@ -349,13 +444,17 @@ class OpenAIService:
|
|
349
444
|
)
|
350
445
|
|
351
446
|
elif item.type == "tool_call_item":
|
352
|
-
|
447
|
+
tool_call_item = item.raw_item
|
448
|
+
|
449
|
+
# Extract tool call information using the helper method
|
450
|
+
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
|
451
|
+
tool_call_map[call_id] = tool_call_item
|
353
452
|
|
354
453
|
tool_request_content = ToolRequestContent(
|
355
454
|
author="agent",
|
356
|
-
tool_call_id=
|
357
|
-
name=
|
358
|
-
arguments=
|
455
|
+
tool_call_id=call_id,
|
456
|
+
name=tool_name,
|
457
|
+
arguments=tool_arguments,
|
359
458
|
)
|
360
459
|
|
361
460
|
# Create tool request using streaming context
|
@@ -376,11 +475,16 @@ class OpenAIService:
|
|
376
475
|
elif item.type == "tool_call_output_item":
|
377
476
|
tool_output_item = item.raw_item
|
378
477
|
|
478
|
+
# Extract tool response information using the helper method
|
479
|
+
call_id, tool_name, content = self._extract_tool_response_info(
|
480
|
+
tool_call_map, tool_output_item
|
481
|
+
)
|
482
|
+
|
379
483
|
tool_response_content = ToolResponseContent(
|
380
484
|
author="agent",
|
381
|
-
tool_call_id=
|
382
|
-
name=
|
383
|
-
content=
|
485
|
+
tool_call_id=call_id,
|
486
|
+
name=tool_name,
|
487
|
+
content=content,
|
384
488
|
)
|
385
489
|
# Create tool response using streaming context
|
386
490
|
async with (
|
@@ -422,6 +526,7 @@ class OpenAIService:
|
|
422
526
|
mcp_timeout_seconds: int | None = None,
|
423
527
|
input_guardrails: list[InputGuardrail] | None = None,
|
424
528
|
output_guardrails: list[OutputGuardrail] | None = None,
|
529
|
+
max_turns: int | None = None,
|
425
530
|
) -> RunResultStreaming:
|
426
531
|
"""
|
427
532
|
Run an agent with streaming enabled but no TaskMessage creation.
|
@@ -446,6 +551,8 @@ class OpenAIService:
|
|
446
551
|
initial user input.
|
447
552
|
output_guardrails: Optional list of output guardrails to run on
|
448
553
|
final agent output.
|
554
|
+
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
555
|
+
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
449
556
|
Returns:
|
450
557
|
RunResultStreaming: The result of the agent run with streaming.
|
451
558
|
"""
|
@@ -467,6 +574,7 @@ class OpenAIService:
|
|
467
574
|
"tools": tools,
|
468
575
|
"output_type": output_type,
|
469
576
|
"tool_use_behavior": tool_use_behavior,
|
577
|
+
"max_turns": max_turns,
|
470
578
|
},
|
471
579
|
) as span:
|
472
580
|
heartbeat_if_in_workflow("run agent streamed")
|
@@ -503,7 +611,10 @@ class OpenAIService:
|
|
503
611
|
agent = Agent(**agent_kwargs)
|
504
612
|
|
505
613
|
# Run with streaming (but no TaskMessage creation)
|
506
|
-
|
614
|
+
if max_turns is not None:
|
615
|
+
result = Runner.run_streamed(starting_agent=agent, input=input_list, max_turns=max_turns)
|
616
|
+
else:
|
617
|
+
result = Runner.run_streamed(starting_agent=agent, input=input_list)
|
507
618
|
|
508
619
|
if span:
|
509
620
|
span.output = {
|
@@ -541,6 +652,7 @@ class OpenAIService:
|
|
541
652
|
mcp_timeout_seconds: int | None = None,
|
542
653
|
input_guardrails: list[InputGuardrail] | None = None,
|
543
654
|
output_guardrails: list[OutputGuardrail] | None = None,
|
655
|
+
max_turns: int | None = None,
|
544
656
|
) -> RunResultStreaming:
|
545
657
|
"""
|
546
658
|
Run an agent with streaming enabled and automatic TaskMessage creation.
|
@@ -566,6 +678,8 @@ class OpenAIService:
|
|
566
678
|
initial user input.
|
567
679
|
output_guardrails: Optional list of output guardrails to run on
|
568
680
|
final agent output.
|
681
|
+
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
682
|
+
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
569
683
|
|
570
684
|
Returns:
|
571
685
|
RunResultStreaming: The result of the agent run with streaming.
|
@@ -575,7 +689,7 @@ class OpenAIService:
|
|
575
689
|
if self.agentex_client is None:
|
576
690
|
raise ValueError("Agentex client must be provided for auto_send methods")
|
577
691
|
|
578
|
-
tool_call_map: dict[str,
|
692
|
+
tool_call_map: dict[str, Any] = {}
|
579
693
|
|
580
694
|
trace = self.tracer.trace(trace_id)
|
581
695
|
redacted_params = redact_mcp_server_params(mcp_server_params)
|
@@ -596,6 +710,7 @@ class OpenAIService:
|
|
596
710
|
"tools": tools,
|
597
711
|
"output_type": output_type,
|
598
712
|
"tool_use_behavior": tool_use_behavior,
|
713
|
+
"max_turns": max_turns,
|
599
714
|
},
|
600
715
|
) as span:
|
601
716
|
heartbeat_if_in_workflow("run agent streamed auto send")
|
@@ -632,7 +747,10 @@ class OpenAIService:
|
|
632
747
|
agent = Agent(**agent_kwargs)
|
633
748
|
|
634
749
|
# Run with streaming
|
635
|
-
|
750
|
+
if max_turns is not None:
|
751
|
+
result = Runner.run_streamed(starting_agent=agent, input=input_list, max_turns=max_turns)
|
752
|
+
else:
|
753
|
+
result = Runner.run_streamed(starting_agent=agent, input=input_list)
|
636
754
|
|
637
755
|
item_id_to_streaming_context: dict[
|
638
756
|
str, StreamingTaskMessageContext
|
@@ -649,13 +767,16 @@ class OpenAIService:
|
|
649
767
|
if event.type == "run_item_stream_event":
|
650
768
|
if event.item.type == "tool_call_item":
|
651
769
|
tool_call_item = event.item.raw_item
|
652
|
-
|
770
|
+
|
771
|
+
# Extract tool call information using the helper method
|
772
|
+
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
|
773
|
+
tool_call_map[call_id] = tool_call_item
|
653
774
|
|
654
775
|
tool_request_content = ToolRequestContent(
|
655
776
|
author="agent",
|
656
|
-
tool_call_id=
|
657
|
-
name=
|
658
|
-
arguments=
|
777
|
+
tool_call_id=call_id,
|
778
|
+
name=tool_name,
|
779
|
+
arguments=tool_arguments,
|
659
780
|
)
|
660
781
|
|
661
782
|
# Create tool request using streaming context (immediate completion)
|
@@ -677,13 +798,16 @@ class OpenAIService:
|
|
677
798
|
elif event.item.type == "tool_call_output_item":
|
678
799
|
tool_output_item = event.item.raw_item
|
679
800
|
|
801
|
+
# Extract tool response information using the helper method
|
802
|
+
call_id, tool_name, content = self._extract_tool_response_info(
|
803
|
+
tool_call_map, tool_output_item
|
804
|
+
)
|
805
|
+
|
680
806
|
tool_response_content = ToolResponseContent(
|
681
807
|
author="agent",
|
682
|
-
tool_call_id=
|
683
|
-
name=
|
684
|
-
|
685
|
-
].name,
|
686
|
-
content=tool_output_item["output"],
|
808
|
+
tool_call_id=call_id,
|
809
|
+
name=tool_name,
|
810
|
+
content=content,
|
687
811
|
)
|
688
812
|
|
689
813
|
# Create tool response using streaming context (immediate completion)
|
@@ -26,6 +26,7 @@ class TaskCreateParams(BaseModelWithTraceParams):
|
|
26
26
|
agent_id: str | None = None
|
27
27
|
agent_name: str | None = None
|
28
28
|
params: dict[str, Any] | None = None
|
29
|
+
request: dict[str, Any] | None = None
|
29
30
|
|
30
31
|
|
31
32
|
class MessageSendParams(BaseModelWithTraceParams):
|
@@ -33,6 +34,7 @@ class MessageSendParams(BaseModelWithTraceParams):
|
|
33
34
|
agent_name: str | None = None
|
34
35
|
task_id: str | None = None
|
35
36
|
content: TaskMessageContent
|
37
|
+
request: dict[str, Any] | None = None
|
36
38
|
|
37
39
|
|
38
40
|
class EventSendParams(BaseModelWithTraceParams):
|
@@ -40,11 +42,15 @@ class EventSendParams(BaseModelWithTraceParams):
|
|
40
42
|
agent_name: str | None = None
|
41
43
|
task_id: str | None = None
|
42
44
|
content: TaskMessageContent
|
45
|
+
request: dict[str, Any] | None = None
|
43
46
|
|
44
47
|
|
45
48
|
class TaskCancelParams(BaseModelWithTraceParams):
|
46
49
|
task_id: str | None = None
|
47
50
|
task_name: str | None = None
|
51
|
+
agent_id: str | None = None
|
52
|
+
agent_name: str | None = None
|
53
|
+
request: dict[str, Any] | None = None
|
48
54
|
|
49
55
|
|
50
56
|
class ACPActivities:
|
@@ -58,6 +64,9 @@ class ACPActivities:
|
|
58
64
|
agent_id=params.agent_id,
|
59
65
|
agent_name=params.agent_name,
|
60
66
|
params=params.params,
|
67
|
+
trace_id=params.trace_id,
|
68
|
+
parent_span_id=params.parent_span_id,
|
69
|
+
request=params.request,
|
61
70
|
)
|
62
71
|
|
63
72
|
@activity.defn(name=ACPActivityName.MESSAGE_SEND)
|
@@ -67,6 +76,9 @@ class ACPActivities:
|
|
67
76
|
agent_name=params.agent_name,
|
68
77
|
task_id=params.task_id,
|
69
78
|
content=params.content,
|
79
|
+
trace_id=params.trace_id,
|
80
|
+
parent_span_id=params.parent_span_id,
|
81
|
+
request=params.request,
|
70
82
|
)
|
71
83
|
|
72
84
|
@activity.defn(name=ACPActivityName.EVENT_SEND)
|
@@ -76,6 +88,9 @@ class ACPActivities:
|
|
76
88
|
agent_name=params.agent_name,
|
77
89
|
task_id=params.task_id,
|
78
90
|
content=params.content,
|
91
|
+
trace_id=params.trace_id,
|
92
|
+
parent_span_id=params.parent_span_id,
|
93
|
+
request=params.request,
|
79
94
|
)
|
80
95
|
|
81
96
|
@activity.defn(name=ACPActivityName.TASK_CANCEL)
|
@@ -83,4 +98,9 @@ class ACPActivities:
|
|
83
98
|
return await self._acp_service.task_cancel(
|
84
99
|
task_id=params.task_id,
|
85
100
|
task_name=params.task_name,
|
101
|
+
agent_id=params.agent_id,
|
102
|
+
agent_name=params.agent_name,
|
103
|
+
trace_id=params.trace_id,
|
104
|
+
parent_span_id=params.parent_span_id,
|
105
|
+
request=params.request,
|
86
106
|
)
|