openai-agents 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (53) hide show
  1. agents/__init__.py +223 -0
  2. agents/_config.py +23 -0
  3. agents/_debug.py +17 -0
  4. agents/_run_impl.py +792 -0
  5. agents/_utils.py +61 -0
  6. agents/agent.py +159 -0
  7. agents/agent_output.py +144 -0
  8. agents/computer.py +107 -0
  9. agents/exceptions.py +63 -0
  10. agents/extensions/handoff_filters.py +67 -0
  11. agents/extensions/handoff_prompt.py +19 -0
  12. agents/function_schema.py +340 -0
  13. agents/guardrail.py +320 -0
  14. agents/handoffs.py +236 -0
  15. agents/items.py +246 -0
  16. agents/lifecycle.py +105 -0
  17. agents/logger.py +3 -0
  18. agents/model_settings.py +36 -0
  19. agents/models/__init__.py +0 -0
  20. agents/models/_openai_shared.py +34 -0
  21. agents/models/fake_id.py +5 -0
  22. agents/models/interface.py +107 -0
  23. agents/models/openai_chatcompletions.py +952 -0
  24. agents/models/openai_provider.py +65 -0
  25. agents/models/openai_responses.py +384 -0
  26. agents/result.py +220 -0
  27. agents/run.py +904 -0
  28. agents/run_context.py +26 -0
  29. agents/stream_events.py +58 -0
  30. agents/strict_schema.py +167 -0
  31. agents/tool.py +288 -0
  32. agents/tracing/__init__.py +97 -0
  33. agents/tracing/create.py +306 -0
  34. agents/tracing/logger.py +3 -0
  35. agents/tracing/processor_interface.py +69 -0
  36. agents/tracing/processors.py +261 -0
  37. agents/tracing/scope.py +45 -0
  38. agents/tracing/setup.py +211 -0
  39. agents/tracing/span_data.py +188 -0
  40. agents/tracing/spans.py +264 -0
  41. agents/tracing/traces.py +195 -0
  42. agents/tracing/util.py +17 -0
  43. agents/usage.py +22 -0
  44. agents/version.py +7 -0
  45. openai_agents-0.0.3.dist-info/METADATA +204 -0
  46. openai_agents-0.0.3.dist-info/RECORD +49 -0
  47. openai_agents-0.0.3.dist-info/licenses/LICENSE +21 -0
  48. openai-agents/example.py +0 -2
  49. openai_agents-0.0.1.dist-info/METADATA +0 -17
  50. openai_agents-0.0.1.dist-info/RECORD +0 -6
  51. openai_agents-0.0.1.dist-info/licenses/LICENSE +0 -20
  52. {openai-agents → agents/extensions}/__init__.py +0 -0
  53. {openai_agents-0.0.1.dist-info → openai_agents-0.0.3.dist-info}/WHEEL +0 -0
agents/_run_impl.py ADDED
@@ -0,0 +1,792 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ from dataclasses import dataclass
5
+ from typing import TYPE_CHECKING, Any
6
+
7
+ from openai.types.responses import (
8
+ ResponseComputerToolCall,
9
+ ResponseFileSearchToolCall,
10
+ ResponseFunctionToolCall,
11
+ ResponseFunctionWebSearch,
12
+ ResponseOutputMessage,
13
+ )
14
+ from openai.types.responses.response_computer_tool_call import (
15
+ ActionClick,
16
+ ActionDoubleClick,
17
+ ActionDrag,
18
+ ActionKeypress,
19
+ ActionMove,
20
+ ActionScreenshot,
21
+ ActionScroll,
22
+ ActionType,
23
+ ActionWait,
24
+ )
25
+ from openai.types.responses.response_input_param import ComputerCallOutput
26
+ from openai.types.responses.response_reasoning_item import ResponseReasoningItem
27
+
28
+ from . import _utils
29
+ from .agent import Agent
30
+ from .agent_output import AgentOutputSchema
31
+ from .computer import AsyncComputer, Computer
32
+ from .exceptions import AgentsException, ModelBehaviorError, UserError
33
+ from .guardrail import InputGuardrail, InputGuardrailResult, OutputGuardrail, OutputGuardrailResult
34
+ from .handoffs import Handoff, HandoffInputData
35
+ from .items import (
36
+ HandoffCallItem,
37
+ HandoffOutputItem,
38
+ ItemHelpers,
39
+ MessageOutputItem,
40
+ ModelResponse,
41
+ ReasoningItem,
42
+ RunItem,
43
+ ToolCallItem,
44
+ ToolCallOutputItem,
45
+ TResponseInputItem,
46
+ )
47
+ from .lifecycle import RunHooks
48
+ from .logger import logger
49
+ from .models.interface import ModelTracing
50
+ from .run_context import RunContextWrapper, TContext
51
+ from .stream_events import RunItemStreamEvent, StreamEvent
52
+ from .tool import ComputerTool, FunctionTool
53
+ from .tracing import (
54
+ SpanError,
55
+ Trace,
56
+ function_span,
57
+ get_current_trace,
58
+ guardrail_span,
59
+ handoff_span,
60
+ trace,
61
+ )
62
+
63
+ if TYPE_CHECKING:
64
+ from .run import RunConfig
65
+
66
+
67
+ class QueueCompleteSentinel:
68
+ pass
69
+
70
+
71
+ QUEUE_COMPLETE_SENTINEL = QueueCompleteSentinel()
72
+
73
+
74
+ @dataclass
75
+ class ToolRunHandoff:
76
+ handoff: Handoff
77
+ tool_call: ResponseFunctionToolCall
78
+
79
+
80
+ @dataclass
81
+ class ToolRunFunction:
82
+ tool_call: ResponseFunctionToolCall
83
+ function_tool: FunctionTool
84
+
85
+
86
+ @dataclass
87
+ class ToolRunComputerAction:
88
+ tool_call: ResponseComputerToolCall
89
+ computer_tool: ComputerTool
90
+
91
+
92
+ @dataclass
93
+ class ProcessedResponse:
94
+ new_items: list[RunItem]
95
+ handoffs: list[ToolRunHandoff]
96
+ functions: list[ToolRunFunction]
97
+ computer_actions: list[ToolRunComputerAction]
98
+
99
+ def has_tools_to_run(self) -> bool:
100
+ # Handoffs, functions and computer actions need local processing
101
+ # Hosted tools have already run, so there's nothing to do.
102
+ return any(
103
+ [
104
+ self.handoffs,
105
+ self.functions,
106
+ self.computer_actions,
107
+ ]
108
+ )
109
+
110
+
111
+ @dataclass
112
+ class NextStepHandoff:
113
+ new_agent: Agent[Any]
114
+
115
+
116
+ @dataclass
117
+ class NextStepFinalOutput:
118
+ output: Any
119
+
120
+
121
+ @dataclass
122
+ class NextStepRunAgain:
123
+ pass
124
+
125
+
126
+ @dataclass
127
+ class SingleStepResult:
128
+ original_input: str | list[TResponseInputItem]
129
+ """The input items i.e. the items before run() was called. May be mutated by handoff input
130
+ filters."""
131
+
132
+ model_response: ModelResponse
133
+ """The model response for the current step."""
134
+
135
+ pre_step_items: list[RunItem]
136
+ """Items generated before the current step."""
137
+
138
+ new_step_items: list[RunItem]
139
+ """Items generated during this current step."""
140
+
141
+ next_step: NextStepHandoff | NextStepFinalOutput | NextStepRunAgain
142
+ """The next step to take."""
143
+
144
+ @property
145
+ def generated_items(self) -> list[RunItem]:
146
+ """Items generated during the agent run (i.e. everything generated after
147
+ `original_input`)."""
148
+ return self.pre_step_items + self.new_step_items
149
+
150
+
151
+ def get_model_tracing_impl(
152
+ tracing_disabled: bool, trace_include_sensitive_data: bool
153
+ ) -> ModelTracing:
154
+ if tracing_disabled:
155
+ return ModelTracing.DISABLED
156
+ elif trace_include_sensitive_data:
157
+ return ModelTracing.ENABLED
158
+ else:
159
+ return ModelTracing.ENABLED_WITHOUT_DATA
160
+
161
+
162
+ class RunImpl:
163
+ @classmethod
164
+ async def execute_tools_and_side_effects(
165
+ cls,
166
+ *,
167
+ agent: Agent[TContext],
168
+ # The original input to the Runner
169
+ original_input: str | list[TResponseInputItem],
170
+ # Eveything generated by Runner since the original input, but before the current step
171
+ pre_step_items: list[RunItem],
172
+ new_response: ModelResponse,
173
+ processed_response: ProcessedResponse,
174
+ output_schema: AgentOutputSchema | None,
175
+ hooks: RunHooks[TContext],
176
+ context_wrapper: RunContextWrapper[TContext],
177
+ run_config: RunConfig,
178
+ ) -> SingleStepResult:
179
+ # Make a copy of the generated items
180
+ pre_step_items = list(pre_step_items)
181
+
182
+ new_step_items: list[RunItem] = []
183
+ new_step_items.extend(processed_response.new_items)
184
+
185
+ # First, lets run the tool calls - function tools and computer actions
186
+ function_results, computer_results = await asyncio.gather(
187
+ cls.execute_function_tool_calls(
188
+ agent=agent,
189
+ tool_runs=processed_response.functions,
190
+ hooks=hooks,
191
+ context_wrapper=context_wrapper,
192
+ config=run_config,
193
+ ),
194
+ cls.execute_computer_actions(
195
+ agent=agent,
196
+ actions=processed_response.computer_actions,
197
+ hooks=hooks,
198
+ context_wrapper=context_wrapper,
199
+ config=run_config,
200
+ ),
201
+ )
202
+ new_step_items.extend(function_results)
203
+ new_step_items.extend(computer_results)
204
+
205
+ # Second, check if there are any handoffs
206
+ if run_handoffs := processed_response.handoffs:
207
+ return await cls.execute_handoffs(
208
+ agent=agent,
209
+ original_input=original_input,
210
+ pre_step_items=pre_step_items,
211
+ new_step_items=new_step_items,
212
+ new_response=new_response,
213
+ run_handoffs=run_handoffs,
214
+ hooks=hooks,
215
+ context_wrapper=context_wrapper,
216
+ run_config=run_config,
217
+ )
218
+
219
+ # Now we can check if the model also produced a final output
220
+ message_items = [item for item in new_step_items if isinstance(item, MessageOutputItem)]
221
+
222
+ # We'll use the last content output as the final output
223
+ potential_final_output_text = (
224
+ ItemHelpers.extract_last_text(message_items[-1].raw_item) if message_items else None
225
+ )
226
+
227
+ # There are two possibilities that lead to a final output:
228
+ # 1. Structured output schema => always leads to a final output
229
+ # 2. Plain text output schema => only leads to a final output if there are no tool calls
230
+ if output_schema and not output_schema.is_plain_text() and potential_final_output_text:
231
+ final_output = output_schema.validate_json(potential_final_output_text)
232
+ return await cls.execute_final_output(
233
+ agent=agent,
234
+ original_input=original_input,
235
+ new_response=new_response,
236
+ pre_step_items=pre_step_items,
237
+ new_step_items=new_step_items,
238
+ final_output=final_output,
239
+ hooks=hooks,
240
+ context_wrapper=context_wrapper,
241
+ )
242
+ elif (
243
+ not output_schema or output_schema.is_plain_text()
244
+ ) and not processed_response.has_tools_to_run():
245
+ return await cls.execute_final_output(
246
+ agent=agent,
247
+ original_input=original_input,
248
+ new_response=new_response,
249
+ pre_step_items=pre_step_items,
250
+ new_step_items=new_step_items,
251
+ final_output=potential_final_output_text or "",
252
+ hooks=hooks,
253
+ context_wrapper=context_wrapper,
254
+ )
255
+ else:
256
+ # If there's no final output, we can just run again
257
+ return SingleStepResult(
258
+ original_input=original_input,
259
+ model_response=new_response,
260
+ pre_step_items=pre_step_items,
261
+ new_step_items=new_step_items,
262
+ next_step=NextStepRunAgain(),
263
+ )
264
+
265
+ @classmethod
266
+ def process_model_response(
267
+ cls,
268
+ *,
269
+ agent: Agent[Any],
270
+ response: ModelResponse,
271
+ output_schema: AgentOutputSchema | None,
272
+ handoffs: list[Handoff],
273
+ ) -> ProcessedResponse:
274
+ items: list[RunItem] = []
275
+
276
+ run_handoffs = []
277
+ functions = []
278
+ computer_actions = []
279
+
280
+ handoff_map = {handoff.tool_name: handoff for handoff in handoffs}
281
+ function_map = {tool.name: tool for tool in agent.tools if isinstance(tool, FunctionTool)}
282
+ computer_tool = next((tool for tool in agent.tools if isinstance(tool, ComputerTool)), None)
283
+
284
+ for output in response.output:
285
+ if isinstance(output, ResponseOutputMessage):
286
+ items.append(MessageOutputItem(raw_item=output, agent=agent))
287
+ elif isinstance(output, ResponseFileSearchToolCall):
288
+ items.append(ToolCallItem(raw_item=output, agent=agent))
289
+ elif isinstance(output, ResponseFunctionWebSearch):
290
+ items.append(ToolCallItem(raw_item=output, agent=agent))
291
+ elif isinstance(output, ResponseReasoningItem):
292
+ items.append(ReasoningItem(raw_item=output, agent=agent))
293
+ elif isinstance(output, ResponseComputerToolCall):
294
+ items.append(ToolCallItem(raw_item=output, agent=agent))
295
+ if not computer_tool:
296
+ _utils.attach_error_to_current_span(
297
+ SpanError(
298
+ message="Computer tool not found",
299
+ data={},
300
+ )
301
+ )
302
+ raise ModelBehaviorError(
303
+ "Model produced computer action without a computer tool."
304
+ )
305
+ computer_actions.append(
306
+ ToolRunComputerAction(tool_call=output, computer_tool=computer_tool)
307
+ )
308
+ elif not isinstance(output, ResponseFunctionToolCall):
309
+ logger.warning(f"Unexpected output type, ignoring: {type(output)}")
310
+ continue
311
+
312
+ # At this point we know it's a function tool call
313
+ if not isinstance(output, ResponseFunctionToolCall):
314
+ continue
315
+
316
+ # Handoffs
317
+ if output.name in handoff_map:
318
+ items.append(HandoffCallItem(raw_item=output, agent=agent))
319
+ handoff = ToolRunHandoff(
320
+ tool_call=output,
321
+ handoff=handoff_map[output.name],
322
+ )
323
+ run_handoffs.append(handoff)
324
+ # Regular function tool call
325
+ else:
326
+ if output.name not in function_map:
327
+ _utils.attach_error_to_current_span(
328
+ SpanError(
329
+ message="Tool not found",
330
+ data={"tool_name": output.name},
331
+ )
332
+ )
333
+ raise ModelBehaviorError(f"Tool {output.name} not found in agent {agent.name}")
334
+ items.append(ToolCallItem(raw_item=output, agent=agent))
335
+ functions.append(
336
+ ToolRunFunction(
337
+ tool_call=output,
338
+ function_tool=function_map[output.name],
339
+ )
340
+ )
341
+
342
+ return ProcessedResponse(
343
+ new_items=items,
344
+ handoffs=run_handoffs,
345
+ functions=functions,
346
+ computer_actions=computer_actions,
347
+ )
348
+
349
+ @classmethod
350
+ async def execute_function_tool_calls(
351
+ cls,
352
+ *,
353
+ agent: Agent[TContext],
354
+ tool_runs: list[ToolRunFunction],
355
+ hooks: RunHooks[TContext],
356
+ context_wrapper: RunContextWrapper[TContext],
357
+ config: RunConfig,
358
+ ) -> list[RunItem]:
359
+ async def run_single_tool(
360
+ func_tool: FunctionTool, tool_call: ResponseFunctionToolCall
361
+ ) -> str:
362
+ with function_span(func_tool.name) as span_fn:
363
+ if config.trace_include_sensitive_data:
364
+ span_fn.span_data.input = tool_call.arguments
365
+ try:
366
+ _, _, result = await asyncio.gather(
367
+ hooks.on_tool_start(context_wrapper, agent, func_tool),
368
+ (
369
+ agent.hooks.on_tool_start(context_wrapper, agent, func_tool)
370
+ if agent.hooks
371
+ else _utils.noop_coroutine()
372
+ ),
373
+ func_tool.on_invoke_tool(context_wrapper, tool_call.arguments),
374
+ )
375
+
376
+ await asyncio.gather(
377
+ hooks.on_tool_end(context_wrapper, agent, func_tool, result),
378
+ (
379
+ agent.hooks.on_tool_end(context_wrapper, agent, func_tool, result)
380
+ if agent.hooks
381
+ else _utils.noop_coroutine()
382
+ ),
383
+ )
384
+ except Exception as e:
385
+ _utils.attach_error_to_current_span(
386
+ SpanError(
387
+ message="Error running tool",
388
+ data={"tool_name": func_tool.name, "error": str(e)},
389
+ )
390
+ )
391
+ if isinstance(e, AgentsException):
392
+ raise e
393
+ raise UserError(f"Error running tool {func_tool.name}: {e}") from e
394
+
395
+ if config.trace_include_sensitive_data:
396
+ span_fn.span_data.output = result
397
+ return result
398
+
399
+ tasks = []
400
+ for tool_run in tool_runs:
401
+ function_tool = tool_run.function_tool
402
+ tasks.append(run_single_tool(function_tool, tool_run.tool_call))
403
+
404
+ results = await asyncio.gather(*tasks)
405
+
406
+ return [
407
+ ToolCallOutputItem(
408
+ output=str(result),
409
+ raw_item=ItemHelpers.tool_call_output_item(tool_run.tool_call, str(result)),
410
+ agent=agent,
411
+ )
412
+ for tool_run, result in zip(tool_runs, results)
413
+ ]
414
+
415
+ @classmethod
416
+ async def execute_computer_actions(
417
+ cls,
418
+ *,
419
+ agent: Agent[TContext],
420
+ actions: list[ToolRunComputerAction],
421
+ hooks: RunHooks[TContext],
422
+ context_wrapper: RunContextWrapper[TContext],
423
+ config: RunConfig,
424
+ ) -> list[RunItem]:
425
+ results: list[RunItem] = []
426
+ # Need to run these serially, because each action can affect the computer state
427
+ for action in actions:
428
+ results.append(
429
+ await ComputerAction.execute(
430
+ agent=agent,
431
+ action=action,
432
+ hooks=hooks,
433
+ context_wrapper=context_wrapper,
434
+ config=config,
435
+ )
436
+ )
437
+
438
+ return results
439
+
440
+ @classmethod
441
+ async def execute_handoffs(
442
+ cls,
443
+ *,
444
+ agent: Agent[TContext],
445
+ original_input: str | list[TResponseInputItem],
446
+ pre_step_items: list[RunItem],
447
+ new_step_items: list[RunItem],
448
+ new_response: ModelResponse,
449
+ run_handoffs: list[ToolRunHandoff],
450
+ hooks: RunHooks[TContext],
451
+ context_wrapper: RunContextWrapper[TContext],
452
+ run_config: RunConfig,
453
+ ) -> SingleStepResult:
454
+ # If there is more than one handoff, add tool responses that reject those handoffs
455
+ if len(run_handoffs) > 1:
456
+ output_message = "Multiple handoffs detected, ignoring this one."
457
+ new_step_items.extend(
458
+ [
459
+ ToolCallOutputItem(
460
+ output=output_message,
461
+ raw_item=ItemHelpers.tool_call_output_item(
462
+ handoff.tool_call, output_message
463
+ ),
464
+ agent=agent,
465
+ )
466
+ for handoff in run_handoffs[1:]
467
+ ]
468
+ )
469
+
470
+ actual_handoff = run_handoffs[0]
471
+ with handoff_span(from_agent=agent.name) as span_handoff:
472
+ handoff = actual_handoff.handoff
473
+ new_agent: Agent[Any] = await handoff.on_invoke_handoff(
474
+ context_wrapper, actual_handoff.tool_call.arguments
475
+ )
476
+ span_handoff.span_data.to_agent = new_agent.name
477
+
478
+ # Append a tool output item for the handoff
479
+ new_step_items.append(
480
+ HandoffOutputItem(
481
+ agent=agent,
482
+ raw_item=ItemHelpers.tool_call_output_item(
483
+ actual_handoff.tool_call,
484
+ handoff.get_transfer_message(new_agent),
485
+ ),
486
+ source_agent=agent,
487
+ target_agent=new_agent,
488
+ )
489
+ )
490
+
491
+ # Execute handoff hooks
492
+ await asyncio.gather(
493
+ hooks.on_handoff(
494
+ context=context_wrapper,
495
+ from_agent=agent,
496
+ to_agent=new_agent,
497
+ ),
498
+ (
499
+ agent.hooks.on_handoff(
500
+ context_wrapper,
501
+ agent=new_agent,
502
+ source=agent,
503
+ )
504
+ if agent.hooks
505
+ else _utils.noop_coroutine()
506
+ ),
507
+ )
508
+
509
+ # If there's an input filter, filter the input for the next agent
510
+ input_filter = handoff.input_filter or (
511
+ run_config.handoff_input_filter if run_config else None
512
+ )
513
+ if input_filter:
514
+ logger.debug("Filtering inputs for handoff")
515
+ handoff_input_data = HandoffInputData(
516
+ input_history=tuple(original_input)
517
+ if isinstance(original_input, list)
518
+ else original_input,
519
+ pre_handoff_items=tuple(pre_step_items),
520
+ new_items=tuple(new_step_items),
521
+ )
522
+ if not callable(input_filter):
523
+ _utils.attach_error_to_span(
524
+ span_handoff,
525
+ SpanError(
526
+ message="Invalid input filter",
527
+ data={"details": "not callable()"},
528
+ ),
529
+ )
530
+ raise UserError(f"Invalid input filter: {input_filter}")
531
+ filtered = input_filter(handoff_input_data)
532
+ if not isinstance(filtered, HandoffInputData):
533
+ _utils.attach_error_to_span(
534
+ span_handoff,
535
+ SpanError(
536
+ message="Invalid input filter result",
537
+ data={"details": "not a HandoffInputData"},
538
+ ),
539
+ )
540
+ raise UserError(f"Invalid input filter result: {filtered}")
541
+
542
+ original_input = (
543
+ filtered.input_history
544
+ if isinstance(filtered.input_history, str)
545
+ else list(filtered.input_history)
546
+ )
547
+ pre_step_items = list(filtered.pre_handoff_items)
548
+ new_step_items = list(filtered.new_items)
549
+
550
+ return SingleStepResult(
551
+ original_input=original_input,
552
+ model_response=new_response,
553
+ pre_step_items=pre_step_items,
554
+ new_step_items=new_step_items,
555
+ next_step=NextStepHandoff(new_agent),
556
+ )
557
+
558
+ @classmethod
559
+ async def execute_final_output(
560
+ cls,
561
+ *,
562
+ agent: Agent[TContext],
563
+ original_input: str | list[TResponseInputItem],
564
+ new_response: ModelResponse,
565
+ pre_step_items: list[RunItem],
566
+ new_step_items: list[RunItem],
567
+ final_output: Any,
568
+ hooks: RunHooks[TContext],
569
+ context_wrapper: RunContextWrapper[TContext],
570
+ ) -> SingleStepResult:
571
+ # Run the on_end hooks
572
+ await cls.run_final_output_hooks(agent, hooks, context_wrapper, final_output)
573
+
574
+ return SingleStepResult(
575
+ original_input=original_input,
576
+ model_response=new_response,
577
+ pre_step_items=pre_step_items,
578
+ new_step_items=new_step_items,
579
+ next_step=NextStepFinalOutput(final_output),
580
+ )
581
+
582
+ @classmethod
583
+ async def run_final_output_hooks(
584
+ cls,
585
+ agent: Agent[TContext],
586
+ hooks: RunHooks[TContext],
587
+ context_wrapper: RunContextWrapper[TContext],
588
+ final_output: Any,
589
+ ):
590
+ await asyncio.gather(
591
+ hooks.on_agent_end(context_wrapper, agent, final_output),
592
+ agent.hooks.on_end(context_wrapper, agent, final_output)
593
+ if agent.hooks
594
+ else _utils.noop_coroutine(),
595
+ )
596
+
597
+ @classmethod
598
+ async def run_single_input_guardrail(
599
+ cls,
600
+ agent: Agent[Any],
601
+ guardrail: InputGuardrail[TContext],
602
+ input: str | list[TResponseInputItem],
603
+ context: RunContextWrapper[TContext],
604
+ ) -> InputGuardrailResult:
605
+ with guardrail_span(guardrail.get_name()) as span_guardrail:
606
+ result = await guardrail.run(agent, input, context)
607
+ span_guardrail.span_data.triggered = result.output.tripwire_triggered
608
+ return result
609
+
610
+ @classmethod
611
+ async def run_single_output_guardrail(
612
+ cls,
613
+ guardrail: OutputGuardrail[TContext],
614
+ agent: Agent[Any],
615
+ agent_output: Any,
616
+ context: RunContextWrapper[TContext],
617
+ ) -> OutputGuardrailResult:
618
+ with guardrail_span(guardrail.get_name()) as span_guardrail:
619
+ result = await guardrail.run(agent=agent, agent_output=agent_output, context=context)
620
+ span_guardrail.span_data.triggered = result.output.tripwire_triggered
621
+ return result
622
+
623
+ @classmethod
624
+ def stream_step_result_to_queue(
625
+ cls,
626
+ step_result: SingleStepResult,
627
+ queue: asyncio.Queue[StreamEvent | QueueCompleteSentinel],
628
+ ):
629
+ for item in step_result.new_step_items:
630
+ if isinstance(item, MessageOutputItem):
631
+ event = RunItemStreamEvent(item=item, name="message_output_created")
632
+ elif isinstance(item, HandoffCallItem):
633
+ event = RunItemStreamEvent(item=item, name="handoff_requested")
634
+ elif isinstance(item, HandoffOutputItem):
635
+ event = RunItemStreamEvent(item=item, name="handoff_occured")
636
+ elif isinstance(item, ToolCallItem):
637
+ event = RunItemStreamEvent(item=item, name="tool_called")
638
+ elif isinstance(item, ToolCallOutputItem):
639
+ event = RunItemStreamEvent(item=item, name="tool_output")
640
+ elif isinstance(item, ReasoningItem):
641
+ event = RunItemStreamEvent(item=item, name="reasoning_item_created")
642
+ else:
643
+ logger.warning(f"Unexpected item type: {type(item)}")
644
+ event = None
645
+
646
+ if event:
647
+ queue.put_nowait(event)
648
+
649
+
650
+ class TraceCtxManager:
651
+ """Creates a trace only if there is no current trace, and manages the trace lifecycle."""
652
+
653
+ def __init__(
654
+ self,
655
+ workflow_name: str,
656
+ trace_id: str | None,
657
+ group_id: str | None,
658
+ metadata: dict[str, Any] | None,
659
+ disabled: bool,
660
+ ):
661
+ self.trace: Trace | None = None
662
+ self.workflow_name = workflow_name
663
+ self.trace_id = trace_id
664
+ self.group_id = group_id
665
+ self.metadata = metadata
666
+ self.disabled = disabled
667
+
668
+ def __enter__(self) -> TraceCtxManager:
669
+ current_trace = get_current_trace()
670
+ if not current_trace:
671
+ self.trace = trace(
672
+ workflow_name=self.workflow_name,
673
+ trace_id=self.trace_id,
674
+ group_id=self.group_id,
675
+ metadata=self.metadata,
676
+ disabled=self.disabled,
677
+ )
678
+ self.trace.start(mark_as_current=True)
679
+
680
+ return self
681
+
682
+ def __exit__(self, exc_type, exc_val, exc_tb):
683
+ if self.trace:
684
+ self.trace.finish(reset_current=True)
685
+
686
+
687
+ class ComputerAction:
688
+ @classmethod
689
+ async def execute(
690
+ cls,
691
+ *,
692
+ agent: Agent[TContext],
693
+ action: ToolRunComputerAction,
694
+ hooks: RunHooks[TContext],
695
+ context_wrapper: RunContextWrapper[TContext],
696
+ config: RunConfig,
697
+ ) -> RunItem:
698
+ output_func = (
699
+ cls._get_screenshot_async(action.computer_tool.computer, action.tool_call)
700
+ if isinstance(action.computer_tool.computer, AsyncComputer)
701
+ else cls._get_screenshot_sync(action.computer_tool.computer, action.tool_call)
702
+ )
703
+
704
+ _, _, output = await asyncio.gather(
705
+ hooks.on_tool_start(context_wrapper, agent, action.computer_tool),
706
+ (
707
+ agent.hooks.on_tool_start(context_wrapper, agent, action.computer_tool)
708
+ if agent.hooks
709
+ else _utils.noop_coroutine()
710
+ ),
711
+ output_func,
712
+ )
713
+
714
+ await asyncio.gather(
715
+ hooks.on_tool_end(context_wrapper, agent, action.computer_tool, output),
716
+ (
717
+ agent.hooks.on_tool_end(context_wrapper, agent, action.computer_tool, output)
718
+ if agent.hooks
719
+ else _utils.noop_coroutine()
720
+ ),
721
+ )
722
+
723
+ # TODO: don't send a screenshot every single time, use references
724
+ image_url = f"data:image/png;base64,{output}"
725
+ return ToolCallOutputItem(
726
+ agent=agent,
727
+ output=image_url,
728
+ raw_item=ComputerCallOutput(
729
+ call_id=action.tool_call.call_id,
730
+ output={
731
+ "type": "computer_screenshot",
732
+ "image_url": image_url,
733
+ },
734
+ type="computer_call_output",
735
+ ),
736
+ )
737
+
738
+ @classmethod
739
+ async def _get_screenshot_sync(
740
+ cls,
741
+ computer: Computer,
742
+ tool_call: ResponseComputerToolCall,
743
+ ) -> str:
744
+ action = tool_call.action
745
+ if isinstance(action, ActionClick):
746
+ computer.click(action.x, action.y, action.button)
747
+ elif isinstance(action, ActionDoubleClick):
748
+ computer.double_click(action.x, action.y)
749
+ elif isinstance(action, ActionDrag):
750
+ computer.drag([(p.x, p.y) for p in action.path])
751
+ elif isinstance(action, ActionKeypress):
752
+ computer.keypress(action.keys)
753
+ elif isinstance(action, ActionMove):
754
+ computer.move(action.x, action.y)
755
+ elif isinstance(action, ActionScreenshot):
756
+ computer.screenshot()
757
+ elif isinstance(action, ActionScroll):
758
+ computer.scroll(action.x, action.y, action.scroll_x, action.scroll_y)
759
+ elif isinstance(action, ActionType):
760
+ computer.type(action.text)
761
+ elif isinstance(action, ActionWait):
762
+ computer.wait()
763
+
764
+ return computer.screenshot()
765
+
766
+ @classmethod
767
+ async def _get_screenshot_async(
768
+ cls,
769
+ computer: AsyncComputer,
770
+ tool_call: ResponseComputerToolCall,
771
+ ) -> str:
772
+ action = tool_call.action
773
+ if isinstance(action, ActionClick):
774
+ await computer.click(action.x, action.y, action.button)
775
+ elif isinstance(action, ActionDoubleClick):
776
+ await computer.double_click(action.x, action.y)
777
+ elif isinstance(action, ActionDrag):
778
+ await computer.drag([(p.x, p.y) for p in action.path])
779
+ elif isinstance(action, ActionKeypress):
780
+ await computer.keypress(action.keys)
781
+ elif isinstance(action, ActionMove):
782
+ await computer.move(action.x, action.y)
783
+ elif isinstance(action, ActionScreenshot):
784
+ await computer.screenshot()
785
+ elif isinstance(action, ActionScroll):
786
+ await computer.scroll(action.x, action.y, action.scroll_x, action.scroll_y)
787
+ elif isinstance(action, ActionType):
788
+ await computer.type(action.text)
789
+ elif isinstance(action, ActionWait):
790
+ await computer.wait()
791
+
792
+ return await computer.screenshot()