agentex-sdk 0.4.10__py3-none-any.whl → 0.4.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentex/_base_client.py +3 -3
- agentex/_compat.py +48 -48
- agentex/_models.py +41 -41
- agentex/_types.py +35 -1
- agentex/_utils/__init__.py +9 -2
- agentex/_utils/_compat.py +45 -0
- agentex/_utils/_datetime_parse.py +136 -0
- agentex/_utils/_transform.py +11 -1
- agentex/_utils/_typing.py +6 -1
- agentex/_utils/_utils.py +0 -1
- agentex/_version.py +1 -1
- agentex/lib/adk/_modules/acp.py +15 -3
- agentex/lib/adk/providers/_modules/openai.py +57 -0
- agentex/lib/cli/handlers/deploy_handlers.py +4 -1
- agentex/lib/cli/templates/temporal/README.md.j2 +18 -2
- agentex/lib/cli/templates/temporal/environments.yaml.j2 +1 -1
- agentex/lib/cli/templates/temporal/project/activities.py.j2 +77 -0
- agentex/lib/cli/templates/temporal/project/run_worker.py.j2 +3 -1
- agentex/lib/core/services/adk/acp/acp.py +27 -12
- agentex/lib/core/services/adk/providers/openai.py +272 -29
- agentex/lib/core/temporal/activities/adk/acp/acp_activities.py +6 -0
- agentex/lib/core/temporal/activities/adk/providers/openai_activities.py +451 -69
- agentex/types/reasoning_content_param.py +4 -3
- {agentex_sdk-0.4.10.dist-info → agentex_sdk-0.4.12.dist-info}/METADATA +1 -1
- {agentex_sdk-0.4.10.dist-info → agentex_sdk-0.4.12.dist-info}/RECORD +28 -25
- {agentex_sdk-0.4.10.dist-info → agentex_sdk-0.4.12.dist-info}/WHEEL +0 -0
- {agentex_sdk-0.4.10.dist-info → agentex_sdk-0.4.12.dist-info}/entry_points.txt +0 -0
- {agentex_sdk-0.4.10.dist-info → agentex_sdk-0.4.12.dist-info}/licenses/LICENSE +0 -0
@@ -1,17 +1,18 @@
|
|
1
1
|
# Standard library imports
|
2
|
-
import json
|
3
2
|
from contextlib import AsyncExitStack, asynccontextmanager
|
4
3
|
from typing import Any, Literal
|
5
4
|
|
6
5
|
from agents import Agent, Runner, RunResult, RunResultStreaming
|
7
6
|
from agents.agent import StopAtTools, ToolsToFinalOutputFunction
|
7
|
+
from agents.guardrail import InputGuardrail, OutputGuardrail
|
8
|
+
from agents.exceptions import InputGuardrailTripwireTriggered, OutputGuardrailTripwireTriggered
|
8
9
|
from agents.mcp import MCPServerStdio
|
9
10
|
from mcp import StdioServerParameters
|
10
11
|
from openai.types.responses import (
|
11
12
|
ResponseCompletedEvent,
|
12
|
-
|
13
|
+
ResponseFunctionWebSearch,
|
14
|
+
ResponseCodeInterpreterToolCall,
|
13
15
|
ResponseOutputItemDoneEvent,
|
14
|
-
ResponseReasoningSummaryPartDoneEvent,
|
15
16
|
ResponseTextDeltaEvent,
|
16
17
|
ResponseReasoningSummaryTextDeltaEvent,
|
17
18
|
ResponseReasoningSummaryTextDoneEvent,
|
@@ -84,6 +85,86 @@ class OpenAIService:
|
|
84
85
|
self.streaming_service = streaming_service
|
85
86
|
self.tracer = tracer
|
86
87
|
|
88
|
+
def _extract_tool_call_info(
|
89
|
+
self, tool_call_item: Any
|
90
|
+
) -> tuple[str, str, dict[str, Any]]:
|
91
|
+
"""
|
92
|
+
Extract call_id, tool_name, and tool_arguments from a tool call item.
|
93
|
+
|
94
|
+
Args:
|
95
|
+
tool_call_item: The tool call item to process
|
96
|
+
|
97
|
+
Returns:
|
98
|
+
A tuple of (call_id, tool_name, tool_arguments)
|
99
|
+
"""
|
100
|
+
# Generic handling for different tool call types
|
101
|
+
# Try 'call_id' first, then 'id', then generate placeholder
|
102
|
+
if hasattr(tool_call_item, 'call_id'):
|
103
|
+
call_id = tool_call_item.call_id
|
104
|
+
elif hasattr(tool_call_item, 'id'):
|
105
|
+
call_id = tool_call_item.id
|
106
|
+
else:
|
107
|
+
call_id = f"unknown_call_{id(tool_call_item)}"
|
108
|
+
logger.warning(
|
109
|
+
f"Warning: Tool call item {type(tool_call_item)} has "
|
110
|
+
f"neither 'call_id' nor 'id' attribute, using placeholder: "
|
111
|
+
f"{call_id}"
|
112
|
+
)
|
113
|
+
|
114
|
+
if isinstance(tool_call_item, ResponseFunctionWebSearch):
|
115
|
+
tool_name = "web_search"
|
116
|
+
tool_arguments = {
|
117
|
+
"action": tool_call_item.action.model_dump(),
|
118
|
+
"status": tool_call_item.status
|
119
|
+
}
|
120
|
+
elif isinstance(tool_call_item, ResponseCodeInterpreterToolCall):
|
121
|
+
tool_name = "code_interpreter"
|
122
|
+
tool_arguments = {
|
123
|
+
"code": tool_call_item.code,
|
124
|
+
"status": tool_call_item.status
|
125
|
+
}
|
126
|
+
else:
|
127
|
+
# Generic handling for any tool call type
|
128
|
+
tool_name = getattr(tool_call_item, 'name', type(tool_call_item).__name__)
|
129
|
+
tool_arguments = tool_call_item.model_dump()
|
130
|
+
|
131
|
+
return call_id, tool_name, tool_arguments
|
132
|
+
|
133
|
+
def _extract_tool_response_info(
|
134
|
+
self, tool_call_map: dict[str, Any], tool_output_item: Any
|
135
|
+
) -> tuple[str, str, str]:
|
136
|
+
"""
|
137
|
+
Extract call_id, tool_name, and content from a tool output item.
|
138
|
+
|
139
|
+
Args:
|
140
|
+
tool_call_map: Map of call_ids to tool_call items
|
141
|
+
tool_output_item: The tool output item to process
|
142
|
+
|
143
|
+
Returns:
|
144
|
+
A tuple of (call_id, tool_name, content)
|
145
|
+
"""
|
146
|
+
# Extract call_id and content from the tool_output_item
|
147
|
+
# Handle both dictionary access and attribute access
|
148
|
+
if hasattr(tool_output_item, 'get') and callable(tool_output_item.get):
|
149
|
+
# Dictionary-like access
|
150
|
+
call_id = tool_output_item["call_id"]
|
151
|
+
content = tool_output_item["output"]
|
152
|
+
else:
|
153
|
+
# Attribute access for structured objects
|
154
|
+
call_id = getattr(tool_output_item, 'call_id', None)
|
155
|
+
content = getattr(tool_output_item, 'output', None)
|
156
|
+
|
157
|
+
# Get the name from the tool call map using generic approach
|
158
|
+
tool_call = tool_call_map[call_id]
|
159
|
+
if hasattr(tool_call, "name"):
|
160
|
+
tool_name = getattr(tool_call, "name")
|
161
|
+
elif hasattr(tool_call, "type"):
|
162
|
+
tool_name = getattr(tool_call, "type")
|
163
|
+
else:
|
164
|
+
tool_name = type(tool_call).__name__
|
165
|
+
|
166
|
+
return call_id, tool_name, content
|
167
|
+
|
87
168
|
async def run_agent(
|
88
169
|
self,
|
89
170
|
input_list: list[dict[str, Any]],
|
@@ -104,6 +185,9 @@ class OpenAIService:
|
|
104
185
|
| ToolsToFinalOutputFunction
|
105
186
|
) = "run_llm_again",
|
106
187
|
mcp_timeout_seconds: int | None = None,
|
188
|
+
input_guardrails: list[InputGuardrail] | None = None,
|
189
|
+
output_guardrails: list[OutputGuardrail] | None = None,
|
190
|
+
max_turns: int | None = None,
|
107
191
|
) -> RunResult:
|
108
192
|
"""
|
109
193
|
Run an agent without streaming or TaskMessage creation.
|
@@ -122,8 +206,14 @@ class OpenAIService:
|
|
122
206
|
tools: Optional list of tools.
|
123
207
|
output_type: Optional output type.
|
124
208
|
tool_use_behavior: Optional tool use behavior.
|
209
|
+
mcp_timeout_seconds: Optional param to set the timeout threshold
|
210
|
+
for the MCP servers. Defaults to 5 seconds.
|
211
|
+
input_guardrails: Optional list of input guardrails to run on
|
212
|
+
initial user input.
|
213
|
+
output_guardrails: Optional list of output guardrails to run on
|
214
|
+
final agent output.
|
125
215
|
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
126
|
-
|
216
|
+
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
127
217
|
Returns:
|
128
218
|
SerializableRunResult: The result of the agent run.
|
129
219
|
"""
|
@@ -145,6 +235,7 @@ class OpenAIService:
|
|
145
235
|
"tools": tools,
|
146
236
|
"output_type": output_type,
|
147
237
|
"tool_use_behavior": tool_use_behavior,
|
238
|
+
"max_turns": max_turns,
|
148
239
|
},
|
149
240
|
) as span:
|
150
241
|
heartbeat_if_in_workflow("run agent")
|
@@ -152,7 +243,9 @@ class OpenAIService:
|
|
152
243
|
async with mcp_server_context(
|
153
244
|
mcp_server_params, mcp_timeout_seconds
|
154
245
|
) as servers:
|
155
|
-
tools = [
|
246
|
+
tools = [
|
247
|
+
tool.to_oai_function_tool()for tool in tools
|
248
|
+
] if tools else []
|
156
249
|
handoffs = (
|
157
250
|
[Agent(**handoff.model_dump()) for handoff in handoffs]
|
158
251
|
if handoffs
|
@@ -174,11 +267,18 @@ class OpenAIService:
|
|
174
267
|
agent_kwargs["model_settings"] = (
|
175
268
|
model_settings.to_oai_model_settings()
|
176
269
|
)
|
270
|
+
if input_guardrails is not None:
|
271
|
+
agent_kwargs["input_guardrails"] = input_guardrails
|
272
|
+
if output_guardrails is not None:
|
273
|
+
agent_kwargs["output_guardrails"] = output_guardrails
|
177
274
|
|
178
275
|
agent = Agent(**agent_kwargs)
|
179
276
|
|
180
277
|
# Run without streaming
|
181
|
-
|
278
|
+
if max_turns is not None:
|
279
|
+
result = await Runner.run(starting_agent=agent, input=input_list, max_turns=max_turns)
|
280
|
+
else:
|
281
|
+
result = await Runner.run(starting_agent=agent, input=input_list)
|
182
282
|
|
183
283
|
if span:
|
184
284
|
span.output = {
|
@@ -214,6 +314,9 @@ class OpenAIService:
|
|
214
314
|
| ToolsToFinalOutputFunction
|
215
315
|
) = "run_llm_again",
|
216
316
|
mcp_timeout_seconds: int | None = None,
|
317
|
+
input_guardrails: list[InputGuardrail] | None = None,
|
318
|
+
output_guardrails: list[OutputGuardrail] | None = None,
|
319
|
+
max_turns: int | None = None,
|
217
320
|
) -> RunResult:
|
218
321
|
"""
|
219
322
|
Run an agent with automatic TaskMessage creation.
|
@@ -234,7 +337,9 @@ class OpenAIService:
|
|
234
337
|
output_type: Optional output type.
|
235
338
|
tool_use_behavior: Optional tool use behavior.
|
236
339
|
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
237
|
-
|
340
|
+
input_guardrails: Optional list of input guardrails to run on initial user input.
|
341
|
+
output_guardrails: Optional list of output guardrails to run on final agent output.
|
342
|
+
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
238
343
|
Returns:
|
239
344
|
SerializableRunResult: The result of the agent run.
|
240
345
|
"""
|
@@ -262,6 +367,7 @@ class OpenAIService:
|
|
262
367
|
"tools": tools,
|
263
368
|
"output_type": output_type,
|
264
369
|
"tool_use_behavior": tool_use_behavior,
|
370
|
+
"max_turns": max_turns,
|
265
371
|
},
|
266
372
|
) as span:
|
267
373
|
heartbeat_if_in_workflow("run agent auto send")
|
@@ -290,11 +396,18 @@ class OpenAIService:
|
|
290
396
|
agent_kwargs["model_settings"] = (
|
291
397
|
model_settings.to_oai_model_settings()
|
292
398
|
)
|
399
|
+
if input_guardrails is not None:
|
400
|
+
agent_kwargs["input_guardrails"] = input_guardrails
|
401
|
+
if output_guardrails is not None:
|
402
|
+
agent_kwargs["output_guardrails"] = output_guardrails
|
293
403
|
|
294
404
|
agent = Agent(**agent_kwargs)
|
295
405
|
|
296
406
|
# Run without streaming
|
297
|
-
|
407
|
+
if max_turns is not None:
|
408
|
+
result = await Runner.run(starting_agent=agent, input=input_list, max_turns=max_turns)
|
409
|
+
else:
|
410
|
+
result = await Runner.run(starting_agent=agent, input=input_list)
|
298
411
|
|
299
412
|
if span:
|
300
413
|
span.output = {
|
@@ -307,7 +420,7 @@ class OpenAIService:
|
|
307
420
|
"final_output": result.final_output,
|
308
421
|
}
|
309
422
|
|
310
|
-
tool_call_map: dict[str,
|
423
|
+
tool_call_map: dict[str, Any] = {}
|
311
424
|
|
312
425
|
for item in result.new_items:
|
313
426
|
if item.type == "message_output_item":
|
@@ -331,13 +444,17 @@ class OpenAIService:
|
|
331
444
|
)
|
332
445
|
|
333
446
|
elif item.type == "tool_call_item":
|
334
|
-
|
447
|
+
tool_call_item = item.raw_item
|
448
|
+
|
449
|
+
# Extract tool call information using the helper method
|
450
|
+
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
|
451
|
+
tool_call_map[call_id] = tool_call_item
|
335
452
|
|
336
453
|
tool_request_content = ToolRequestContent(
|
337
454
|
author="agent",
|
338
|
-
tool_call_id=
|
339
|
-
name=
|
340
|
-
arguments=
|
455
|
+
tool_call_id=call_id,
|
456
|
+
name=tool_name,
|
457
|
+
arguments=tool_arguments,
|
341
458
|
)
|
342
459
|
|
343
460
|
# Create tool request using streaming context
|
@@ -358,11 +475,16 @@ class OpenAIService:
|
|
358
475
|
elif item.type == "tool_call_output_item":
|
359
476
|
tool_output_item = item.raw_item
|
360
477
|
|
478
|
+
# Extract tool response information using the helper method
|
479
|
+
call_id, tool_name, content = self._extract_tool_response_info(
|
480
|
+
tool_call_map, tool_output_item
|
481
|
+
)
|
482
|
+
|
361
483
|
tool_response_content = ToolResponseContent(
|
362
484
|
author="agent",
|
363
|
-
tool_call_id=
|
364
|
-
name=
|
365
|
-
content=
|
485
|
+
tool_call_id=call_id,
|
486
|
+
name=tool_name,
|
487
|
+
content=content,
|
366
488
|
)
|
367
489
|
# Create tool response using streaming context
|
368
490
|
async with (
|
@@ -402,6 +524,9 @@ class OpenAIService:
|
|
402
524
|
| ToolsToFinalOutputFunction
|
403
525
|
) = "run_llm_again",
|
404
526
|
mcp_timeout_seconds: int | None = None,
|
527
|
+
input_guardrails: list[InputGuardrail] | None = None,
|
528
|
+
output_guardrails: list[OutputGuardrail] | None = None,
|
529
|
+
max_turns: int | None = None,
|
405
530
|
) -> RunResultStreaming:
|
406
531
|
"""
|
407
532
|
Run an agent with streaming enabled but no TaskMessage creation.
|
@@ -420,8 +545,14 @@ class OpenAIService:
|
|
420
545
|
tools: Optional list of tools.
|
421
546
|
output_type: Optional output type.
|
422
547
|
tool_use_behavior: Optional tool use behavior.
|
548
|
+
mcp_timeout_seconds: Optional param to set the timeout threshold
|
549
|
+
for the MCP servers. Defaults to 5 seconds.
|
550
|
+
input_guardrails: Optional list of input guardrails to run on
|
551
|
+
initial user input.
|
552
|
+
output_guardrails: Optional list of output guardrails to run on
|
553
|
+
final agent output.
|
423
554
|
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
424
|
-
|
555
|
+
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
425
556
|
Returns:
|
426
557
|
RunResultStreaming: The result of the agent run with streaming.
|
427
558
|
"""
|
@@ -443,6 +574,7 @@ class OpenAIService:
|
|
443
574
|
"tools": tools,
|
444
575
|
"output_type": output_type,
|
445
576
|
"tool_use_behavior": tool_use_behavior,
|
577
|
+
"max_turns": max_turns,
|
446
578
|
},
|
447
579
|
) as span:
|
448
580
|
heartbeat_if_in_workflow("run agent streamed")
|
@@ -471,11 +603,18 @@ class OpenAIService:
|
|
471
603
|
agent_kwargs["model_settings"] = (
|
472
604
|
model_settings.to_oai_model_settings()
|
473
605
|
)
|
606
|
+
if input_guardrails is not None:
|
607
|
+
agent_kwargs["input_guardrails"] = input_guardrails
|
608
|
+
if output_guardrails is not None:
|
609
|
+
agent_kwargs["output_guardrails"] = output_guardrails
|
474
610
|
|
475
611
|
agent = Agent(**agent_kwargs)
|
476
612
|
|
477
613
|
# Run with streaming (but no TaskMessage creation)
|
478
|
-
|
614
|
+
if max_turns is not None:
|
615
|
+
result = Runner.run_streamed(starting_agent=agent, input=input_list, max_turns=max_turns)
|
616
|
+
else:
|
617
|
+
result = Runner.run_streamed(starting_agent=agent, input=input_list)
|
479
618
|
|
480
619
|
if span:
|
481
620
|
span.output = {
|
@@ -511,6 +650,9 @@ class OpenAIService:
|
|
511
650
|
| ToolsToFinalOutputFunction
|
512
651
|
) = "run_llm_again",
|
513
652
|
mcp_timeout_seconds: int | None = None,
|
653
|
+
input_guardrails: list[InputGuardrail] | None = None,
|
654
|
+
output_guardrails: list[OutputGuardrail] | None = None,
|
655
|
+
max_turns: int | None = None,
|
514
656
|
) -> RunResultStreaming:
|
515
657
|
"""
|
516
658
|
Run an agent with streaming enabled and automatic TaskMessage creation.
|
@@ -530,7 +672,14 @@ class OpenAIService:
|
|
530
672
|
tools: Optional list of tools.
|
531
673
|
output_type: Optional output type.
|
532
674
|
tool_use_behavior: Optional tool use behavior.
|
675
|
+
mcp_timeout_seconds: Optional param to set the timeout threshold
|
676
|
+
for the MCP servers. Defaults to 5 seconds.
|
677
|
+
input_guardrails: Optional list of input guardrails to run on
|
678
|
+
initial user input.
|
679
|
+
output_guardrails: Optional list of output guardrails to run on
|
680
|
+
final agent output.
|
533
681
|
mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
|
682
|
+
max_turns: Maximum number of turns the agent can take. Uses Runner's default if None.
|
534
683
|
|
535
684
|
Returns:
|
536
685
|
RunResultStreaming: The result of the agent run with streaming.
|
@@ -540,7 +689,7 @@ class OpenAIService:
|
|
540
689
|
if self.agentex_client is None:
|
541
690
|
raise ValueError("Agentex client must be provided for auto_send methods")
|
542
691
|
|
543
|
-
tool_call_map: dict[str,
|
692
|
+
tool_call_map: dict[str, Any] = {}
|
544
693
|
|
545
694
|
trace = self.tracer.trace(trace_id)
|
546
695
|
redacted_params = redact_mcp_server_params(mcp_server_params)
|
@@ -561,6 +710,7 @@ class OpenAIService:
|
|
561
710
|
"tools": tools,
|
562
711
|
"output_type": output_type,
|
563
712
|
"tool_use_behavior": tool_use_behavior,
|
713
|
+
"max_turns": max_turns,
|
564
714
|
},
|
565
715
|
) as span:
|
566
716
|
heartbeat_if_in_workflow("run agent streamed auto send")
|
@@ -589,11 +739,18 @@ class OpenAIService:
|
|
589
739
|
agent_kwargs["model_settings"] = (
|
590
740
|
model_settings.to_oai_model_settings()
|
591
741
|
)
|
742
|
+
if input_guardrails is not None:
|
743
|
+
agent_kwargs["input_guardrails"] = input_guardrails
|
744
|
+
if output_guardrails is not None:
|
745
|
+
agent_kwargs["output_guardrails"] = output_guardrails
|
592
746
|
|
593
747
|
agent = Agent(**agent_kwargs)
|
594
748
|
|
595
749
|
# Run with streaming
|
596
|
-
|
750
|
+
if max_turns is not None:
|
751
|
+
result = Runner.run_streamed(starting_agent=agent, input=input_list, max_turns=max_turns)
|
752
|
+
else:
|
753
|
+
result = Runner.run_streamed(starting_agent=agent, input=input_list)
|
597
754
|
|
598
755
|
item_id_to_streaming_context: dict[
|
599
756
|
str, StreamingTaskMessageContext
|
@@ -610,13 +767,16 @@ class OpenAIService:
|
|
610
767
|
if event.type == "run_item_stream_event":
|
611
768
|
if event.item.type == "tool_call_item":
|
612
769
|
tool_call_item = event.item.raw_item
|
613
|
-
|
770
|
+
|
771
|
+
# Extract tool call information using the helper method
|
772
|
+
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
|
773
|
+
tool_call_map[call_id] = tool_call_item
|
614
774
|
|
615
775
|
tool_request_content = ToolRequestContent(
|
616
776
|
author="agent",
|
617
|
-
tool_call_id=
|
618
|
-
name=
|
619
|
-
arguments=
|
777
|
+
tool_call_id=call_id,
|
778
|
+
name=tool_name,
|
779
|
+
arguments=tool_arguments,
|
620
780
|
)
|
621
781
|
|
622
782
|
# Create tool request using streaming context (immediate completion)
|
@@ -638,13 +798,16 @@ class OpenAIService:
|
|
638
798
|
elif event.item.type == "tool_call_output_item":
|
639
799
|
tool_output_item = event.item.raw_item
|
640
800
|
|
801
|
+
# Extract tool response information using the helper method
|
802
|
+
call_id, tool_name, content = self._extract_tool_response_info(
|
803
|
+
tool_call_map, tool_output_item
|
804
|
+
)
|
805
|
+
|
641
806
|
tool_response_content = ToolResponseContent(
|
642
807
|
author="agent",
|
643
|
-
tool_call_id=
|
644
|
-
name=
|
645
|
-
|
646
|
-
].name,
|
647
|
-
content=tool_output_item["output"],
|
808
|
+
tool_call_id=call_id,
|
809
|
+
name=tool_name,
|
810
|
+
content=content,
|
648
811
|
)
|
649
812
|
|
650
813
|
# Create tool response using streaming context (immediate completion)
|
@@ -829,6 +992,86 @@ class OpenAIService:
|
|
829
992
|
await streaming_context.close()
|
830
993
|
unclosed_item_ids.discard(item_id)
|
831
994
|
|
995
|
+
except InputGuardrailTripwireTriggered as e:
|
996
|
+
# Handle guardrail trigger by sending a rejection message
|
997
|
+
rejection_message = "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question."
|
998
|
+
|
999
|
+
# Try to extract rejection message from the guardrail result
|
1000
|
+
if hasattr(e, 'guardrail_result') and hasattr(e.guardrail_result, 'output'):
|
1001
|
+
output_info = getattr(e.guardrail_result.output, 'output_info', {})
|
1002
|
+
if isinstance(output_info, dict) and 'rejection_message' in output_info:
|
1003
|
+
rejection_message = output_info['rejection_message']
|
1004
|
+
elif hasattr(e.guardrail_result, 'guardrail'):
|
1005
|
+
# Fall back to using guardrail name if no custom message
|
1006
|
+
triggered_guardrail_name = getattr(e.guardrail_result.guardrail, 'name', None)
|
1007
|
+
if triggered_guardrail_name:
|
1008
|
+
rejection_message = f"I'm sorry, but I cannot process this request. The '{triggered_guardrail_name}' guardrail was triggered."
|
1009
|
+
|
1010
|
+
# Create and send the rejection message as a TaskMessage
|
1011
|
+
async with (
|
1012
|
+
self.streaming_service.streaming_task_message_context(
|
1013
|
+
task_id=task_id,
|
1014
|
+
initial_content=TextContent(
|
1015
|
+
author="agent",
|
1016
|
+
content=rejection_message,
|
1017
|
+
),
|
1018
|
+
) as streaming_context
|
1019
|
+
):
|
1020
|
+
# Send the full message
|
1021
|
+
await streaming_context.stream_update(
|
1022
|
+
update=StreamTaskMessageFull(
|
1023
|
+
parent_task_message=streaming_context.task_message,
|
1024
|
+
content=TextContent(
|
1025
|
+
author="agent",
|
1026
|
+
content=rejection_message,
|
1027
|
+
),
|
1028
|
+
type="full",
|
1029
|
+
),
|
1030
|
+
)
|
1031
|
+
|
1032
|
+
# Re-raise to let the activity handle it
|
1033
|
+
raise
|
1034
|
+
|
1035
|
+
except OutputGuardrailTripwireTriggered as e:
|
1036
|
+
# Handle output guardrail trigger by sending a rejection message
|
1037
|
+
rejection_message = "I'm sorry, but I cannot provide this response due to a guardrail. Please try a different question."
|
1038
|
+
|
1039
|
+
# Try to extract rejection message from the guardrail result
|
1040
|
+
if hasattr(e, 'guardrail_result') and hasattr(e.guardrail_result, 'output'):
|
1041
|
+
output_info = getattr(e.guardrail_result.output, 'output_info', {})
|
1042
|
+
if isinstance(output_info, dict) and 'rejection_message' in output_info:
|
1043
|
+
rejection_message = output_info['rejection_message']
|
1044
|
+
elif hasattr(e.guardrail_result, 'guardrail'):
|
1045
|
+
# Fall back to using guardrail name if no custom message
|
1046
|
+
triggered_guardrail_name = getattr(e.guardrail_result.guardrail, 'name', None)
|
1047
|
+
if triggered_guardrail_name:
|
1048
|
+
rejection_message = f"I'm sorry, but I cannot provide this response. The '{triggered_guardrail_name}' guardrail was triggered."
|
1049
|
+
|
1050
|
+
# Create and send the rejection message as a TaskMessage
|
1051
|
+
async with (
|
1052
|
+
self.streaming_service.streaming_task_message_context(
|
1053
|
+
task_id=task_id,
|
1054
|
+
initial_content=TextContent(
|
1055
|
+
author="agent",
|
1056
|
+
content=rejection_message,
|
1057
|
+
),
|
1058
|
+
) as streaming_context
|
1059
|
+
):
|
1060
|
+
# Send the full message
|
1061
|
+
await streaming_context.stream_update(
|
1062
|
+
update=StreamTaskMessageFull(
|
1063
|
+
parent_task_message=streaming_context.task_message,
|
1064
|
+
content=TextContent(
|
1065
|
+
author="agent",
|
1066
|
+
content=rejection_message,
|
1067
|
+
),
|
1068
|
+
type="full",
|
1069
|
+
),
|
1070
|
+
)
|
1071
|
+
|
1072
|
+
# Re-raise to let the activity handle it
|
1073
|
+
raise
|
1074
|
+
|
832
1075
|
finally:
|
833
1076
|
# Cleanup: ensure all streaming contexts for this session are properly finished
|
834
1077
|
# Create a copy to avoid modifying set during iteration
|
@@ -45,6 +45,8 @@ class EventSendParams(BaseModelWithTraceParams):
|
|
45
45
|
class TaskCancelParams(BaseModelWithTraceParams):
|
46
46
|
task_id: str | None = None
|
47
47
|
task_name: str | None = None
|
48
|
+
agent_id: str | None = None
|
49
|
+
agent_name: str | None = None
|
48
50
|
|
49
51
|
|
50
52
|
class ACPActivities:
|
@@ -83,4 +85,8 @@ class ACPActivities:
|
|
83
85
|
return await self._acp_service.task_cancel(
|
84
86
|
task_id=params.task_id,
|
85
87
|
task_name=params.task_name,
|
88
|
+
agent_id=params.agent_id,
|
89
|
+
agent_name=params.agent_name,
|
90
|
+
trace_id=params.trace_id,
|
91
|
+
parent_span_id=params.parent_span_id,
|
86
92
|
)
|