agentex-sdk 0.4.9__py3-none-any.whl → 0.4.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,18 +16,20 @@ from ._utils import (
16
16
  lru_cache,
17
17
  is_mapping,
18
18
  is_iterable,
19
+ is_sequence,
19
20
  )
20
21
  from .._files import is_base64_file_input
22
+ from ._compat import get_origin, is_typeddict
21
23
  from ._typing import (
22
24
  is_list_type,
23
25
  is_union_type,
24
26
  extract_type_arg,
25
27
  is_iterable_type,
26
28
  is_required_type,
29
+ is_sequence_type,
27
30
  is_annotated_type,
28
31
  strip_annotated_type,
29
32
  )
30
- from .._compat import get_origin, model_dump, is_typeddict
31
33
 
32
34
  _T = TypeVar("_T")
33
35
 
@@ -167,6 +169,8 @@ def _transform_recursive(
167
169
 
168
170
  Defaults to the same value as the `annotation` argument.
169
171
  """
172
+ from .._compat import model_dump
173
+
170
174
  if inner_type is None:
171
175
  inner_type = annotation
172
176
 
@@ -184,6 +188,8 @@ def _transform_recursive(
184
188
  (is_list_type(stripped_type) and is_list(data))
185
189
  # Iterable[T]
186
190
  or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
191
+ # Sequence[T]
192
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
187
193
  ):
188
194
  # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
189
195
  # intended as an iterable, so we don't transform it.
@@ -329,6 +335,8 @@ async def _async_transform_recursive(
329
335
 
330
336
  Defaults to the same value as the `annotation` argument.
331
337
  """
338
+ from .._compat import model_dump
339
+
332
340
  if inner_type is None:
333
341
  inner_type = annotation
334
342
 
@@ -346,6 +354,8 @@ async def _async_transform_recursive(
346
354
  (is_list_type(stripped_type) and is_list(data))
347
355
  # Iterable[T]
348
356
  or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
357
+ # Sequence[T]
358
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
349
359
  ):
350
360
  # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
351
361
  # intended as an iterable, so we don't transform it.
agentex/_utils/_typing.py CHANGED
@@ -15,7 +15,7 @@ from typing_extensions import (
15
15
 
16
16
  from ._utils import lru_cache
17
17
  from .._types import InheritsGeneric
18
- from .._compat import is_union as _is_union
18
+ from ._compat import is_union as _is_union
19
19
 
20
20
 
21
21
  def is_annotated_type(typ: type) -> bool:
@@ -26,6 +26,11 @@ def is_list_type(typ: type) -> bool:
26
26
  return (get_origin(typ) or typ) == list
27
27
 
28
28
 
29
+ def is_sequence_type(typ: type) -> bool:
30
+ origin = get_origin(typ) or typ
31
+ return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence
32
+
33
+
29
34
  def is_iterable_type(typ: type) -> bool:
30
35
  """If the given type is `typing.Iterable[T]`"""
31
36
  origin = get_origin(typ) or typ
agentex/_utils/_utils.py CHANGED
@@ -22,7 +22,6 @@ from typing_extensions import TypeGuard
22
22
  import sniffio
23
23
 
24
24
  from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike
25
- from .._compat import parse_date as parse_date, parse_datetime as parse_datetime
26
25
 
27
26
  _T = TypeVar("_T")
28
27
  _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
agentex/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "agentex"
4
- __version__ = "0.4.9" # x-release-please-version
4
+ __version__ = "0.4.11" # x-release-please-version
@@ -4,6 +4,7 @@ from typing import Any, Literal
4
4
  from agentex.lib.adk.utils._modules.client import create_async_agentex_client
5
5
  from agents import Agent, RunResult, RunResultStreaming
6
6
  from agents.agent import StopAtTools, ToolsToFinalOutputFunction
7
+ from agents.guardrail import InputGuardrail, OutputGuardrail
7
8
  from agents.agent_output import AgentOutputSchemaBase
8
9
  from agents.model_settings import ModelSettings
9
10
  from agents.tool import Tool
@@ -84,6 +85,9 @@ class OpenAIModule:
84
85
  | StopAtTools
85
86
  | ToolsToFinalOutputFunction
86
87
  ) = "run_llm_again",
88
+ mcp_timeout_seconds: int | None = None,
89
+ input_guardrails: list[InputGuardrail] | None = None,
90
+ output_guardrails: list[OutputGuardrail] | None = None,
87
91
  ) -> SerializableRunResult | RunResult:
88
92
  """
89
93
  Run an agent without streaming or TaskMessage creation.
@@ -107,6 +111,9 @@ class OpenAIModule:
107
111
  tools: Optional list of tools.
108
112
  output_type: Optional output type.
109
113
  tool_use_behavior: Optional tool use behavior.
114
+ mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
115
+ input_guardrails: Optional list of input guardrails to run on initial user input.
116
+ output_guardrails: Optional list of output guardrails to run on final agent output.
110
117
 
111
118
  Returns:
112
119
  Union[SerializableRunResult, RunResult]: SerializableRunResult when in Temporal, RunResult otherwise.
@@ -126,6 +133,9 @@ class OpenAIModule:
126
133
  tools=tools,
127
134
  output_type=output_type,
128
135
  tool_use_behavior=tool_use_behavior,
136
+ mcp_timeout_seconds=mcp_timeout_seconds,
137
+ input_guardrails=input_guardrails,
138
+ output_guardrails=output_guardrails,
129
139
  )
130
140
  return await ActivityHelpers.execute_activity(
131
141
  activity_name=OpenAIActivityName.RUN_AGENT,
@@ -150,6 +160,9 @@ class OpenAIModule:
150
160
  tools=tools,
151
161
  output_type=output_type,
152
162
  tool_use_behavior=tool_use_behavior,
163
+ mcp_timeout_seconds=mcp_timeout_seconds,
164
+ input_guardrails=input_guardrails,
165
+ output_guardrails=output_guardrails,
153
166
  )
154
167
 
155
168
  async def run_agent_auto_send(
@@ -175,6 +188,9 @@ class OpenAIModule:
175
188
  | StopAtTools
176
189
  | ToolsToFinalOutputFunction
177
190
  ) = "run_llm_again",
191
+ mcp_timeout_seconds: int | None = None,
192
+ input_guardrails: list[InputGuardrail] | None = None,
193
+ output_guardrails: list[OutputGuardrail] | None = None,
178
194
  ) -> SerializableRunResult | RunResult:
179
195
  """
180
196
  Run an agent with automatic TaskMessage creation.
@@ -197,6 +213,9 @@ class OpenAIModule:
197
213
  tools: Optional list of tools.
198
214
  output_type: Optional output type.
199
215
  tool_use_behavior: Optional tool use behavior.
216
+ mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
217
+ input_guardrails: Optional list of input guardrails to run on initial user input.
218
+ output_guardrails: Optional list of output guardrails to run on final agent output.
200
219
 
201
220
  Returns:
202
221
  Union[SerializableRunResult, RunResult]: SerializableRunResult when in Temporal, RunResult otherwise.
@@ -217,6 +236,9 @@ class OpenAIModule:
217
236
  tools=tools,
218
237
  output_type=output_type,
219
238
  tool_use_behavior=tool_use_behavior,
239
+ mcp_timeout_seconds=mcp_timeout_seconds,
240
+ input_guardrails=input_guardrails,
241
+ output_guardrails=output_guardrails,
220
242
  )
221
243
  return await ActivityHelpers.execute_activity(
222
244
  activity_name=OpenAIActivityName.RUN_AGENT_AUTO_SEND,
@@ -242,6 +264,9 @@ class OpenAIModule:
242
264
  tools=tools,
243
265
  output_type=output_type,
244
266
  tool_use_behavior=tool_use_behavior,
267
+ mcp_timeout_seconds=mcp_timeout_seconds,
268
+ input_guardrails=input_guardrails,
269
+ output_guardrails=output_guardrails,
245
270
  )
246
271
 
247
272
  async def run_agent_streamed(
@@ -263,6 +288,9 @@ class OpenAIModule:
263
288
  | StopAtTools
264
289
  | ToolsToFinalOutputFunction
265
290
  ) = "run_llm_again",
291
+ mcp_timeout_seconds: int | None = None,
292
+ input_guardrails: list[InputGuardrail] | None = None,
293
+ output_guardrails: list[OutputGuardrail] | None = None,
266
294
  ) -> RunResultStreaming:
267
295
  """
268
296
  Run an agent with streaming enabled but no TaskMessage creation.
@@ -289,6 +317,9 @@ class OpenAIModule:
289
317
  tools: Optional list of tools.
290
318
  output_type: Optional output type.
291
319
  tool_use_behavior: Optional tool use behavior.
320
+ mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
321
+ input_guardrails: Optional list of input guardrails to run on initial user input.
322
+ output_guardrails: Optional list of output guardrails to run on final agent output.
292
323
 
293
324
  Returns:
294
325
  RunResultStreaming: The result of the agent run with streaming.
@@ -318,6 +349,9 @@ class OpenAIModule:
318
349
  tools=tools,
319
350
  output_type=output_type,
320
351
  tool_use_behavior=tool_use_behavior,
352
+ mcp_timeout_seconds=mcp_timeout_seconds,
353
+ input_guardrails=input_guardrails,
354
+ output_guardrails=output_guardrails,
321
355
  )
322
356
 
323
357
  async def run_agent_streamed_auto_send(
@@ -344,6 +378,8 @@ class OpenAIModule:
344
378
  | ToolsToFinalOutputFunction
345
379
  ) = "run_llm_again",
346
380
  mcp_timeout_seconds: int | None = None,
381
+ input_guardrails: list[InputGuardrail] | None = None,
382
+ output_guardrails: list[OutputGuardrail] | None = None,
347
383
  ) -> SerializableRunResultStreaming | RunResultStreaming:
348
384
  """
349
385
  Run an agent with streaming enabled and automatic TaskMessage creation.
@@ -364,6 +400,8 @@ class OpenAIModule:
364
400
  model: Optional model to use.
365
401
  model_settings: Optional model settings.
366
402
  tools: Optional list of tools.
403
+ input_guardrails: Optional list of input guardrails to run on initial user input.
404
+ output_guardrails: Optional list of output guardrails to run on final agent output.
367
405
  output_type: Optional output type.
368
406
  tool_use_behavior: Optional tool use behavior.
369
407
  mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
@@ -388,6 +426,8 @@ class OpenAIModule:
388
426
  output_type=output_type,
389
427
  tool_use_behavior=tool_use_behavior,
390
428
  mcp_timeout_seconds=mcp_timeout_seconds,
429
+ input_guardrails=input_guardrails,
430
+ output_guardrails=output_guardrails,
391
431
  )
392
432
  return await ActivityHelpers.execute_activity(
393
433
  activity_name=OpenAIActivityName.RUN_AGENT_STREAMED_AUTO_SEND,
@@ -414,4 +454,6 @@ class OpenAIModule:
414
454
  output_type=output_type,
415
455
  tool_use_behavior=tool_use_behavior,
416
456
  mcp_timeout_seconds=mcp_timeout_seconds,
457
+ input_guardrails=input_guardrails,
458
+ output_guardrails=output_guardrails,
417
459
  )
@@ -15,8 +15,10 @@ RUN apt-get update && apt-get install -y \
15
15
  gcc \
16
16
  cmake \
17
17
  netcat-openbsd \
18
+ nodejs \
19
+ npm \
18
20
  && apt-get clean \
19
- && rm -rf /var/lib/apt/lists/*
21
+ && rm -rf /var/lib/apt/lists/**
20
22
 
21
23
  RUN uv pip install --system --upgrade pip setuptools wheel
22
24
 
@@ -15,8 +15,10 @@ RUN apt-get update && apt-get install -y \
15
15
  gcc \
16
16
  cmake \
17
17
  netcat-openbsd \
18
+ nodejs \
19
+ npm \
18
20
  && apt-get clean \
19
- && rm -rf /var/lib/apt/lists/*
21
+ && rm -rf /var/lib/apt/lists/**
20
22
 
21
23
  RUN uv pip install --system --upgrade pip setuptools wheel
22
24
 
@@ -15,8 +15,10 @@ RUN apt-get update && apt-get install -y \
15
15
  gcc \
16
16
  cmake \
17
17
  netcat-openbsd \
18
+ nodejs \
19
+ npm \
18
20
  && apt-get clean \
19
- && rm -rf /var/lib/apt/lists/*
21
+ && rm -rf /var/lib/apt/lists/**
20
22
 
21
23
  # Install tctl (Temporal CLI)
22
24
  RUN curl -L https://github.com/temporalio/tctl/releases/download/v1.18.1/tctl_1.18.1_linux_arm64.tar.gz -o /tmp/tctl.tar.gz && \
@@ -262,14 +262,30 @@ class MyWorkflow(BaseWorkflow):
262
262
  ```
263
263
 
264
264
  ### Custom Activities
265
- Add custom activities for external operations:
265
+ Add custom activities for external operations. **Important**: Always specify appropriate timeouts (recommended: 10 minutes):
266
266
 
267
267
  ```python
268
268
  # In project/activities.py
269
- @activity.defn
269
+ from datetime import timedelta
270
+ from temporalio import activity
271
+ from temporalio.common import RetryPolicy
272
+
273
+ @activity.defn(name="call_external_api")
270
274
  async def call_external_api(data):
271
275
  # HTTP requests, database operations, etc.
272
276
  pass
277
+
278
+ # In your workflow, call it with a timeout:
279
+ result = await workflow.execute_activity(
280
+ "call_external_api",
281
+ data,
282
+ start_to_close_timeout=timedelta(minutes=10), # Recommended: 10 minute timeout
283
+ heartbeat_timeout=timedelta(minutes=1), # Optional: heartbeat monitoring
284
+ retry_policy=RetryPolicy(maximum_attempts=3) # Optional: retry policy
285
+ )
286
+
287
+ # Don't forget to register your custom activities in run_worker.py:
288
+ # all_activities = get_all_activities() + [your_custom_activity_function]
273
289
  ```
274
290
 
275
291
  ### Integration with External Services
@@ -0,0 +1,77 @@
1
+ """
2
+ Custom Temporal Activities Template
3
+ ====================================
4
+ This file is for defining custom Temporal activities that can be executed
5
+ by your workflow. Activities are used for:
6
+ - External API calls
7
+ - Database operations
8
+ - File I/O operations
9
+ - Heavy computations
10
+ - Any non-deterministic operations
11
+
12
+ IMPORTANT: All activities should have appropriate timeouts!
13
+ Default recommendation: start_to_close_timeout=timedelta(minutes=10)
14
+ """
15
+
16
+ from datetime import timedelta
17
+ from typing import Any, Dict
18
+
19
+ from pydantic import BaseModel
20
+ from temporalio import activity
21
+ from temporalio.common import RetryPolicy
22
+
23
+ from agentex.lib.utils.logging import make_logger
24
+
25
+ logger = make_logger(__name__)
26
+
27
+
28
+ # Example activity parameter models
29
+ class ExampleActivityParams(BaseModel):
30
+ """Parameters for the example activity"""
31
+ data: Dict[str, Any]
32
+ task_id: str
33
+
34
+
35
+ # Example custom activity
36
+ @activity.defn(name="example_custom_activity")
37
+ async def example_custom_activity(params: ExampleActivityParams) -> Dict[str, Any]:
38
+ """
39
+ Example custom activity that demonstrates best practices.
40
+
41
+ When calling this activity from your workflow, use:
42
+ ```python
43
+ result = await workflow.execute_activity(
44
+ "example_custom_activity",
45
+ ExampleActivityParams(data={"key": "value"}, task_id=task_id),
46
+ start_to_close_timeout=timedelta(minutes=10), # Recommended: 10 minute timeout
47
+ heartbeat_timeout=timedelta(minutes=1), # Optional: heartbeat every minute
48
+ retry_policy=RetryPolicy(maximum_attempts=3) # Optional: retry up to 3 times
49
+ )
50
+ ```
51
+ """
52
+ logger.info(f"Processing activity for task {params.task_id} with data: {params.data}")
53
+
54
+ # Your activity logic here
55
+ # This could be:
56
+ # - API calls
57
+ # - Database operations
58
+ # - File processing
59
+ # - ML model inference
60
+ # - etc.
61
+
62
+ result = {
63
+ "status": "success",
64
+ "processed_data": params.data,
65
+ "task_id": params.task_id
66
+ }
67
+
68
+ return result
69
+
70
+
71
+ # Add more custom activities below as needed
72
+ # Remember to:
73
+ # 1. Use appropriate timeouts (default: 10 minutes)
74
+ # 2. Define clear parameter models with Pydantic
75
+ # 3. Handle errors appropriately
76
+ # 4. Use logging for debugging
77
+ # 5. Keep activities focused on a single responsibility
@@ -22,13 +22,15 @@ async def main():
22
22
  if task_queue_name is None:
23
23
  raise ValueError("WORKFLOW_TASK_QUEUE is not set")
24
24
 
25
+ all_activities = get_all_activities() + [] # add your own activities here
26
+
25
27
  # Create a worker with automatic tracing
26
28
  worker = AgentexWorker(
27
29
  task_queue=task_queue_name,
28
30
  )
29
31
 
30
32
  await worker.run(
31
- activities=get_all_activities(),
33
+ activities=all_activities,
32
34
  workflow={{ workflow_class }},
33
35
  )
34
36
 
@@ -5,13 +5,14 @@ from typing import Any, Literal
5
5
 
6
6
  from agents import Agent, Runner, RunResult, RunResultStreaming
7
7
  from agents.agent import StopAtTools, ToolsToFinalOutputFunction
8
+ from agents.guardrail import InputGuardrail, OutputGuardrail
9
+ from agents.exceptions import InputGuardrailTripwireTriggered, OutputGuardrailTripwireTriggered
8
10
  from agents.mcp import MCPServerStdio
9
11
  from mcp import StdioServerParameters
10
12
  from openai.types.responses import (
11
13
  ResponseCompletedEvent,
12
14
  ResponseFunctionToolCall,
13
15
  ResponseOutputItemDoneEvent,
14
- ResponseReasoningSummaryPartDoneEvent,
15
16
  ResponseTextDeltaEvent,
16
17
  ResponseReasoningSummaryTextDeltaEvent,
17
18
  ResponseReasoningSummaryTextDoneEvent,
@@ -104,6 +105,8 @@ class OpenAIService:
104
105
  | ToolsToFinalOutputFunction
105
106
  ) = "run_llm_again",
106
107
  mcp_timeout_seconds: int | None = None,
108
+ input_guardrails: list[InputGuardrail] | None = None,
109
+ output_guardrails: list[OutputGuardrail] | None = None,
107
110
  ) -> RunResult:
108
111
  """
109
112
  Run an agent without streaming or TaskMessage creation.
@@ -122,8 +125,12 @@ class OpenAIService:
122
125
  tools: Optional list of tools.
123
126
  output_type: Optional output type.
124
127
  tool_use_behavior: Optional tool use behavior.
125
- mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
126
-
128
+ mcp_timeout_seconds: Optional param to set the timeout threshold
129
+ for the MCP servers. Defaults to 5 seconds.
130
+ input_guardrails: Optional list of input guardrails to run on
131
+ initial user input.
132
+ output_guardrails: Optional list of output guardrails to run on
133
+ final agent output.
127
134
  Returns:
128
135
  SerializableRunResult: The result of the agent run.
129
136
  """
@@ -174,6 +181,10 @@ class OpenAIService:
174
181
  agent_kwargs["model_settings"] = (
175
182
  model_settings.to_oai_model_settings()
176
183
  )
184
+ if input_guardrails is not None:
185
+ agent_kwargs["input_guardrails"] = input_guardrails
186
+ if output_guardrails is not None:
187
+ agent_kwargs["output_guardrails"] = output_guardrails
177
188
 
178
189
  agent = Agent(**agent_kwargs)
179
190
 
@@ -214,6 +225,8 @@ class OpenAIService:
214
225
  | ToolsToFinalOutputFunction
215
226
  ) = "run_llm_again",
216
227
  mcp_timeout_seconds: int | None = None,
228
+ input_guardrails: list[InputGuardrail] | None = None,
229
+ output_guardrails: list[OutputGuardrail] | None = None,
217
230
  ) -> RunResult:
218
231
  """
219
232
  Run an agent with automatic TaskMessage creation.
@@ -234,7 +247,8 @@ class OpenAIService:
234
247
  output_type: Optional output type.
235
248
  tool_use_behavior: Optional tool use behavior.
236
249
  mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
237
-
250
+ input_guardrails: Optional list of input guardrails to run on initial user input.
251
+ output_guardrails: Optional list of output guardrails to run on final agent output.
238
252
  Returns:
239
253
  SerializableRunResult: The result of the agent run.
240
254
  """
@@ -290,6 +304,10 @@ class OpenAIService:
290
304
  agent_kwargs["model_settings"] = (
291
305
  model_settings.to_oai_model_settings()
292
306
  )
307
+ if input_guardrails is not None:
308
+ agent_kwargs["input_guardrails"] = input_guardrails
309
+ if output_guardrails is not None:
310
+ agent_kwargs["output_guardrails"] = output_guardrails
293
311
 
294
312
  agent = Agent(**agent_kwargs)
295
313
 
@@ -402,6 +420,8 @@ class OpenAIService:
402
420
  | ToolsToFinalOutputFunction
403
421
  ) = "run_llm_again",
404
422
  mcp_timeout_seconds: int | None = None,
423
+ input_guardrails: list[InputGuardrail] | None = None,
424
+ output_guardrails: list[OutputGuardrail] | None = None,
405
425
  ) -> RunResultStreaming:
406
426
  """
407
427
  Run an agent with streaming enabled but no TaskMessage creation.
@@ -420,8 +440,12 @@ class OpenAIService:
420
440
  tools: Optional list of tools.
421
441
  output_type: Optional output type.
422
442
  tool_use_behavior: Optional tool use behavior.
423
- mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
424
-
443
+ mcp_timeout_seconds: Optional param to set the timeout threshold
444
+ for the MCP servers. Defaults to 5 seconds.
445
+ input_guardrails: Optional list of input guardrails to run on
446
+ initial user input.
447
+ output_guardrails: Optional list of output guardrails to run on
448
+ final agent output.
425
449
  Returns:
426
450
  RunResultStreaming: The result of the agent run with streaming.
427
451
  """
@@ -471,6 +495,10 @@ class OpenAIService:
471
495
  agent_kwargs["model_settings"] = (
472
496
  model_settings.to_oai_model_settings()
473
497
  )
498
+ if input_guardrails is not None:
499
+ agent_kwargs["input_guardrails"] = input_guardrails
500
+ if output_guardrails is not None:
501
+ agent_kwargs["output_guardrails"] = output_guardrails
474
502
 
475
503
  agent = Agent(**agent_kwargs)
476
504
 
@@ -511,6 +539,8 @@ class OpenAIService:
511
539
  | ToolsToFinalOutputFunction
512
540
  ) = "run_llm_again",
513
541
  mcp_timeout_seconds: int | None = None,
542
+ input_guardrails: list[InputGuardrail] | None = None,
543
+ output_guardrails: list[OutputGuardrail] | None = None,
514
544
  ) -> RunResultStreaming:
515
545
  """
516
546
  Run an agent with streaming enabled and automatic TaskMessage creation.
@@ -530,7 +560,12 @@ class OpenAIService:
530
560
  tools: Optional list of tools.
531
561
  output_type: Optional output type.
532
562
  tool_use_behavior: Optional tool use behavior.
533
- mcp_timeout_seconds: Optional param to set the timeout threshold for the MCP servers. Defaults to 5 seconds.
563
+ mcp_timeout_seconds: Optional param to set the timeout threshold
564
+ for the MCP servers. Defaults to 5 seconds.
565
+ input_guardrails: Optional list of input guardrails to run on
566
+ initial user input.
567
+ output_guardrails: Optional list of output guardrails to run on
568
+ final agent output.
534
569
 
535
570
  Returns:
536
571
  RunResultStreaming: The result of the agent run with streaming.
@@ -589,6 +624,10 @@ class OpenAIService:
589
624
  agent_kwargs["model_settings"] = (
590
625
  model_settings.to_oai_model_settings()
591
626
  )
627
+ if input_guardrails is not None:
628
+ agent_kwargs["input_guardrails"] = input_guardrails
629
+ if output_guardrails is not None:
630
+ agent_kwargs["output_guardrails"] = output_guardrails
592
631
 
593
632
  agent = Agent(**agent_kwargs)
594
633
 
@@ -829,6 +868,86 @@ class OpenAIService:
829
868
  await streaming_context.close()
830
869
  unclosed_item_ids.discard(item_id)
831
870
 
871
+ except InputGuardrailTripwireTriggered as e:
872
+ # Handle guardrail trigger by sending a rejection message
873
+ rejection_message = "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question."
874
+
875
+ # Try to extract rejection message from the guardrail result
876
+ if hasattr(e, 'guardrail_result') and hasattr(e.guardrail_result, 'output'):
877
+ output_info = getattr(e.guardrail_result.output, 'output_info', {})
878
+ if isinstance(output_info, dict) and 'rejection_message' in output_info:
879
+ rejection_message = output_info['rejection_message']
880
+ elif hasattr(e.guardrail_result, 'guardrail'):
881
+ # Fall back to using guardrail name if no custom message
882
+ triggered_guardrail_name = getattr(e.guardrail_result.guardrail, 'name', None)
883
+ if triggered_guardrail_name:
884
+ rejection_message = f"I'm sorry, but I cannot process this request. The '{triggered_guardrail_name}' guardrail was triggered."
885
+
886
+ # Create and send the rejection message as a TaskMessage
887
+ async with (
888
+ self.streaming_service.streaming_task_message_context(
889
+ task_id=task_id,
890
+ initial_content=TextContent(
891
+ author="agent",
892
+ content=rejection_message,
893
+ ),
894
+ ) as streaming_context
895
+ ):
896
+ # Send the full message
897
+ await streaming_context.stream_update(
898
+ update=StreamTaskMessageFull(
899
+ parent_task_message=streaming_context.task_message,
900
+ content=TextContent(
901
+ author="agent",
902
+ content=rejection_message,
903
+ ),
904
+ type="full",
905
+ ),
906
+ )
907
+
908
+ # Re-raise to let the activity handle it
909
+ raise
910
+
911
+ except OutputGuardrailTripwireTriggered as e:
912
+ # Handle output guardrail trigger by sending a rejection message
913
+ rejection_message = "I'm sorry, but I cannot provide this response due to a guardrail. Please try a different question."
914
+
915
+ # Try to extract rejection message from the guardrail result
916
+ if hasattr(e, 'guardrail_result') and hasattr(e.guardrail_result, 'output'):
917
+ output_info = getattr(e.guardrail_result.output, 'output_info', {})
918
+ if isinstance(output_info, dict) and 'rejection_message' in output_info:
919
+ rejection_message = output_info['rejection_message']
920
+ elif hasattr(e.guardrail_result, 'guardrail'):
921
+ # Fall back to using guardrail name if no custom message
922
+ triggered_guardrail_name = getattr(e.guardrail_result.guardrail, 'name', None)
923
+ if triggered_guardrail_name:
924
+ rejection_message = f"I'm sorry, but I cannot provide this response. The '{triggered_guardrail_name}' guardrail was triggered."
925
+
926
+ # Create and send the rejection message as a TaskMessage
927
+ async with (
928
+ self.streaming_service.streaming_task_message_context(
929
+ task_id=task_id,
930
+ initial_content=TextContent(
931
+ author="agent",
932
+ content=rejection_message,
933
+ ),
934
+ ) as streaming_context
935
+ ):
936
+ # Send the full message
937
+ await streaming_context.stream_update(
938
+ update=StreamTaskMessageFull(
939
+ parent_task_message=streaming_context.task_message,
940
+ content=TextContent(
941
+ author="agent",
942
+ content=rejection_message,
943
+ ),
944
+ type="full",
945
+ ),
946
+ )
947
+
948
+ # Re-raise to let the activity handle it
949
+ raise
950
+
832
951
  finally:
833
952
  # Cleanup: ensure all streaming contexts for this session are properly finished
834
953
  # Create a copy to avoid modifying set during iteration