openai-agents 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/run.py CHANGED
@@ -8,6 +8,7 @@ from typing import Any, cast
8
8
  from openai.types.responses import ResponseCompletedEvent
9
9
 
10
10
  from ._run_impl import (
11
+ AgentToolUseTracker,
11
12
  NextStepFinalOutput,
12
13
  NextStepHandoff,
13
14
  NextStepRunAgain,
@@ -37,6 +38,7 @@ from .models.openai_provider import OpenAIProvider
37
38
  from .result import RunResult, RunResultStreaming
38
39
  from .run_context import RunContextWrapper, TContext
39
40
  from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent
41
+ from .tool import Tool
40
42
  from .tracing import Span, SpanError, agent_span, get_current_trace, trace
41
43
  from .tracing.span_data import AgentSpanData
42
44
  from .usage import Usage
@@ -149,6 +151,8 @@ class Runner:
149
151
  if run_config is None:
150
152
  run_config = RunConfig()
151
153
 
154
+ tool_use_tracker = AgentToolUseTracker()
155
+
152
156
  with TraceCtxManager(
153
157
  workflow_name=run_config.workflow_name,
154
158
  trace_id=run_config.trace_id,
@@ -177,7 +181,6 @@ class Runner:
177
181
  # agent changes, or if the agent loop ends.
178
182
  if current_span is None:
179
183
  handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)]
180
- tool_names = [t.name for t in current_agent.tools]
181
184
  if output_schema := cls._get_output_schema(current_agent):
182
185
  output_type_name = output_schema.output_type_name()
183
186
  else:
@@ -186,11 +189,13 @@ class Runner:
186
189
  current_span = agent_span(
187
190
  name=current_agent.name,
188
191
  handoffs=handoff_names,
189
- tools=tool_names,
190
192
  output_type=output_type_name,
191
193
  )
192
194
  current_span.start(mark_as_current=True)
193
195
 
196
+ all_tools = await cls._get_all_tools(current_agent)
197
+ current_span.span_data.tools = [t.name for t in all_tools]
198
+
194
199
  current_turn += 1
195
200
  if current_turn > max_turns:
196
201
  _error_tracing.attach_error_to_span(
@@ -217,23 +222,27 @@ class Runner:
217
222
  ),
218
223
  cls._run_single_turn(
219
224
  agent=current_agent,
225
+ all_tools=all_tools,
220
226
  original_input=original_input,
221
227
  generated_items=generated_items,
222
228
  hooks=hooks,
223
229
  context_wrapper=context_wrapper,
224
230
  run_config=run_config,
225
231
  should_run_agent_start_hooks=should_run_agent_start_hooks,
232
+ tool_use_tracker=tool_use_tracker,
226
233
  ),
227
234
  )
228
235
  else:
229
236
  turn_result = await cls._run_single_turn(
230
237
  agent=current_agent,
238
+ all_tools=all_tools,
231
239
  original_input=original_input,
232
240
  generated_items=generated_items,
233
241
  hooks=hooks,
234
242
  context_wrapper=context_wrapper,
235
243
  run_config=run_config,
236
244
  should_run_agent_start_hooks=should_run_agent_start_hooks,
245
+ tool_use_tracker=tool_use_tracker,
237
246
  )
238
247
  should_run_agent_start_hooks = False
239
248
 
@@ -481,6 +490,7 @@ class Runner:
481
490
  current_agent = starting_agent
482
491
  current_turn = 0
483
492
  should_run_agent_start_hooks = True
493
+ tool_use_tracker = AgentToolUseTracker()
484
494
 
485
495
  streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent))
486
496
 
@@ -493,7 +503,6 @@ class Runner:
493
503
  # agent changes, or if the agent loop ends.
494
504
  if current_span is None:
495
505
  handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)]
496
- tool_names = [t.name for t in current_agent.tools]
497
506
  if output_schema := cls._get_output_schema(current_agent):
498
507
  output_type_name = output_schema.output_type_name()
499
508
  else:
@@ -502,11 +511,13 @@ class Runner:
502
511
  current_span = agent_span(
503
512
  name=current_agent.name,
504
513
  handoffs=handoff_names,
505
- tools=tool_names,
506
514
  output_type=output_type_name,
507
515
  )
508
516
  current_span.start(mark_as_current=True)
509
517
 
518
+ all_tools = await cls._get_all_tools(current_agent)
519
+ tool_names = [t.name for t in all_tools]
520
+ current_span.span_data.tools = tool_names
510
521
  current_turn += 1
511
522
  streamed_result.current_turn = current_turn
512
523
 
@@ -541,6 +552,8 @@ class Runner:
541
552
  context_wrapper,
542
553
  run_config,
543
554
  should_run_agent_start_hooks,
555
+ tool_use_tracker,
556
+ all_tools,
544
557
  )
545
558
  should_run_agent_start_hooks = False
546
559
 
@@ -608,6 +621,8 @@ class Runner:
608
621
  context_wrapper: RunContextWrapper[TContext],
609
622
  run_config: RunConfig,
610
623
  should_run_agent_start_hooks: bool,
624
+ tool_use_tracker: AgentToolUseTracker,
625
+ all_tools: list[Tool],
611
626
  ) -> SingleStepResult:
612
627
  if should_run_agent_start_hooks:
613
628
  await asyncio.gather(
@@ -627,9 +642,10 @@ class Runner:
627
642
  system_prompt = await agent.get_system_prompt(context_wrapper)
628
643
 
629
644
  handoffs = cls._get_handoffs(agent)
630
-
631
645
  model = cls._get_model(agent, run_config)
632
646
  model_settings = agent.model_settings.resolve(run_config.model_settings)
647
+ model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
648
+
633
649
  final_response: ModelResponse | None = None
634
650
 
635
651
  input = ItemHelpers.input_to_new_input_list(streamed_result.input)
@@ -640,7 +656,7 @@ class Runner:
640
656
  system_prompt,
641
657
  input,
642
658
  model_settings,
643
- agent.tools,
659
+ all_tools,
644
660
  output_schema,
645
661
  handoffs,
646
662
  get_model_tracing_impl(
@@ -677,10 +693,12 @@ class Runner:
677
693
  pre_step_items=streamed_result.new_items,
678
694
  new_response=final_response,
679
695
  output_schema=output_schema,
696
+ all_tools=all_tools,
680
697
  handoffs=handoffs,
681
698
  hooks=hooks,
682
699
  context_wrapper=context_wrapper,
683
700
  run_config=run_config,
701
+ tool_use_tracker=tool_use_tracker,
684
702
  )
685
703
 
686
704
  RunImpl.stream_step_result_to_queue(single_step_result, streamed_result._event_queue)
@@ -691,12 +709,14 @@ class Runner:
691
709
  cls,
692
710
  *,
693
711
  agent: Agent[TContext],
712
+ all_tools: list[Tool],
694
713
  original_input: str | list[TResponseInputItem],
695
714
  generated_items: list[RunItem],
696
715
  hooks: RunHooks[TContext],
697
716
  context_wrapper: RunContextWrapper[TContext],
698
717
  run_config: RunConfig,
699
718
  should_run_agent_start_hooks: bool,
719
+ tool_use_tracker: AgentToolUseTracker,
700
720
  ) -> SingleStepResult:
701
721
  # Ensure we run the hooks before anything else
702
722
  if should_run_agent_start_hooks:
@@ -721,9 +741,11 @@ class Runner:
721
741
  system_prompt,
722
742
  input,
723
743
  output_schema,
744
+ all_tools,
724
745
  handoffs,
725
746
  context_wrapper,
726
747
  run_config,
748
+ tool_use_tracker,
727
749
  )
728
750
 
729
751
  return await cls._get_single_step_result_from_response(
@@ -732,10 +754,12 @@ class Runner:
732
754
  pre_step_items=generated_items,
733
755
  new_response=new_response,
734
756
  output_schema=output_schema,
757
+ all_tools=all_tools,
735
758
  handoffs=handoffs,
736
759
  hooks=hooks,
737
760
  context_wrapper=context_wrapper,
738
761
  run_config=run_config,
762
+ tool_use_tracker=tool_use_tracker,
739
763
  )
740
764
 
741
765
  @classmethod
@@ -743,6 +767,7 @@ class Runner:
743
767
  cls,
744
768
  *,
745
769
  agent: Agent[TContext],
770
+ all_tools: list[Tool],
746
771
  original_input: str | list[TResponseInputItem],
747
772
  pre_step_items: list[RunItem],
748
773
  new_response: ModelResponse,
@@ -751,13 +776,18 @@ class Runner:
751
776
  hooks: RunHooks[TContext],
752
777
  context_wrapper: RunContextWrapper[TContext],
753
778
  run_config: RunConfig,
779
+ tool_use_tracker: AgentToolUseTracker,
754
780
  ) -> SingleStepResult:
755
781
  processed_response = RunImpl.process_model_response(
756
782
  agent=agent,
783
+ all_tools=all_tools,
757
784
  response=new_response,
758
785
  output_schema=output_schema,
759
786
  handoffs=handoffs,
760
787
  )
788
+
789
+ tool_use_tracker.add_tool_use(agent, processed_response.tools_used)
790
+
761
791
  return await RunImpl.execute_tools_and_side_effects(
762
792
  agent=agent,
763
793
  original_input=original_input,
@@ -853,17 +883,21 @@ class Runner:
853
883
  system_prompt: str | None,
854
884
  input: list[TResponseInputItem],
855
885
  output_schema: AgentOutputSchema | None,
886
+ all_tools: list[Tool],
856
887
  handoffs: list[Handoff],
857
888
  context_wrapper: RunContextWrapper[TContext],
858
889
  run_config: RunConfig,
890
+ tool_use_tracker: AgentToolUseTracker,
859
891
  ) -> ModelResponse:
860
892
  model = cls._get_model(agent, run_config)
861
893
  model_settings = agent.model_settings.resolve(run_config.model_settings)
894
+ model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
895
+
862
896
  new_response = await model.get_response(
863
897
  system_instructions=system_prompt,
864
898
  input=input,
865
899
  model_settings=model_settings,
866
- tools=agent.tools,
900
+ tools=all_tools,
867
901
  output_schema=output_schema,
868
902
  handoffs=handoffs,
869
903
  tracing=get_model_tracing_impl(
@@ -892,6 +926,10 @@ class Runner:
892
926
  handoffs.append(handoff(handoff_item))
893
927
  return handoffs
894
928
 
929
+ @classmethod
930
+ async def _get_all_tools(cls, agent: Agent[Any]) -> list[Tool]:
931
+ return await agent.get_all_tools()
932
+
895
933
  @classmethod
896
934
  def _get_model(cls, agent: Agent[Any], run_config: RunConfig) -> Model:
897
935
  if isinstance(run_config.model, Model):
@@ -9,8 +9,12 @@ from .create import (
9
9
  get_current_trace,
10
10
  guardrail_span,
11
11
  handoff_span,
12
+ mcp_tools_span,
12
13
  response_span,
14
+ speech_group_span,
15
+ speech_span,
13
16
  trace,
17
+ transcription_span,
14
18
  )
15
19
  from .processor_interface import TracingProcessor
16
20
  from .processors import default_exporter, default_processor
@@ -22,8 +26,12 @@ from .span_data import (
22
26
  GenerationSpanData,
23
27
  GuardrailSpanData,
24
28
  HandoffSpanData,
29
+ MCPListToolsSpanData,
25
30
  ResponseSpanData,
26
31
  SpanData,
32
+ SpeechGroupSpanData,
33
+ SpeechSpanData,
34
+ TranscriptionSpanData,
27
35
  )
28
36
  from .spans import Span, SpanError
29
37
  from .traces import Trace
@@ -53,10 +61,18 @@ __all__ = [
53
61
  "GenerationSpanData",
54
62
  "GuardrailSpanData",
55
63
  "HandoffSpanData",
64
+ "MCPListToolsSpanData",
56
65
  "ResponseSpanData",
66
+ "SpeechGroupSpanData",
67
+ "SpeechSpanData",
68
+ "TranscriptionSpanData",
57
69
  "TracingProcessor",
58
70
  "gen_trace_id",
59
71
  "gen_span_id",
72
+ "speech_group_span",
73
+ "speech_span",
74
+ "transcription_span",
75
+ "mcp_tools_span",
60
76
  ]
61
77
 
62
78
 
agents/tracing/create.py CHANGED
@@ -12,7 +12,11 @@ from .span_data import (
12
12
  GenerationSpanData,
13
13
  GuardrailSpanData,
14
14
  HandoffSpanData,
15
+ MCPListToolsSpanData,
15
16
  ResponseSpanData,
17
+ SpeechGroupSpanData,
18
+ SpeechSpanData,
19
+ TranscriptionSpanData,
16
20
  )
17
21
  from .spans import Span
18
22
  from .traces import Trace
@@ -181,7 +185,11 @@ def generation_span(
181
185
  """
182
186
  return GLOBAL_TRACE_PROVIDER.create_span(
183
187
  span_data=GenerationSpanData(
184
- input=input, output=output, model=model, model_config=model_config, usage=usage
188
+ input=input,
189
+ output=output,
190
+ model=model,
191
+ model_config=model_config,
192
+ usage=usage,
185
193
  ),
186
194
  span_id=span_id,
187
195
  parent=parent,
@@ -304,3 +312,144 @@ def guardrail_span(
304
312
  parent=parent,
305
313
  disabled=disabled,
306
314
  )
315
+
316
+
317
+ def transcription_span(
318
+ model: str | None = None,
319
+ input: str | None = None,
320
+ input_format: str | None = "pcm",
321
+ output: str | None = None,
322
+ model_config: Mapping[str, Any] | None = None,
323
+ span_id: str | None = None,
324
+ parent: Trace | Span[Any] | None = None,
325
+ disabled: bool = False,
326
+ ) -> Span[TranscriptionSpanData]:
327
+ """Create a new transcription span. The span will not be started automatically, you should
328
+ either do `with transcription_span() ...` or call `span.start()` + `span.finish()` manually.
329
+
330
+ Args:
331
+ model: The name of the model used for the speech-to-text.
332
+ input: The audio input of the speech-to-text transcription, as a base64 encoded string of
333
+ audio bytes.
334
+ input_format: The format of the audio input (defaults to "pcm").
335
+ output: The output of the speech-to-text transcription.
336
+ model_config: The model configuration (hyperparameters) used.
337
+ span_id: The ID of the span. Optional. If not provided, we will generate an ID. We
338
+ recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are
339
+ correctly formatted.
340
+ parent: The parent span or trace. If not provided, we will automatically use the current
341
+ trace/span as the parent.
342
+ disabled: If True, we will return a Span but the Span will not be recorded.
343
+
344
+ Returns:
345
+ The newly created speech-to-text span.
346
+ """
347
+ return GLOBAL_TRACE_PROVIDER.create_span(
348
+ span_data=TranscriptionSpanData(
349
+ input=input,
350
+ input_format=input_format,
351
+ output=output,
352
+ model=model,
353
+ model_config=model_config,
354
+ ),
355
+ span_id=span_id,
356
+ parent=parent,
357
+ disabled=disabled,
358
+ )
359
+
360
+
361
+ def speech_span(
362
+ model: str | None = None,
363
+ input: str | None = None,
364
+ output: str | None = None,
365
+ output_format: str | None = "pcm",
366
+ model_config: Mapping[str, Any] | None = None,
367
+ first_content_at: str | None = None,
368
+ span_id: str | None = None,
369
+ parent: Trace | Span[Any] | None = None,
370
+ disabled: bool = False,
371
+ ) -> Span[SpeechSpanData]:
372
+ """Create a new speech span. The span will not be started automatically, you should either do
373
+ `with speech_span() ...` or call `span.start()` + `span.finish()` manually.
374
+
375
+ Args:
376
+ model: The name of the model used for the text-to-speech.
377
+ input: The text input of the text-to-speech.
378
+ output: The audio output of the text-to-speech as base64 encoded string of PCM audio bytes.
379
+ output_format: The format of the audio output (defaults to "pcm").
380
+ model_config: The model configuration (hyperparameters) used.
381
+ first_content_at: The time of the first byte of the audio output.
382
+ span_id: The ID of the span. Optional. If not provided, we will generate an ID. We
383
+ recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are
384
+ correctly formatted.
385
+ parent: The parent span or trace. If not provided, we will automatically use the current
386
+ trace/span as the parent.
387
+ disabled: If True, we will return a Span but the Span will not be recorded.
388
+ """
389
+ return GLOBAL_TRACE_PROVIDER.create_span(
390
+ span_data=SpeechSpanData(
391
+ model=model,
392
+ input=input,
393
+ output=output,
394
+ output_format=output_format,
395
+ model_config=model_config,
396
+ first_content_at=first_content_at,
397
+ ),
398
+ span_id=span_id,
399
+ parent=parent,
400
+ disabled=disabled,
401
+ )
402
+
403
+
404
+ def speech_group_span(
405
+ input: str | None = None,
406
+ span_id: str | None = None,
407
+ parent: Trace | Span[Any] | None = None,
408
+ disabled: bool = False,
409
+ ) -> Span[SpeechGroupSpanData]:
410
+ """Create a new speech group span. The span will not be started automatically, you should
411
+ either do `with speech_group_span() ...` or call `span.start()` + `span.finish()` manually.
412
+
413
+ Args:
414
+ input: The input text used for the speech request.
415
+ span_id: The ID of the span. Optional. If not provided, we will generate an ID. We
416
+ recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are
417
+ correctly formatted.
418
+ parent: The parent span or trace. If not provided, we will automatically use the current
419
+ trace/span as the parent.
420
+ disabled: If True, we will return a Span but the Span will not be recorded.
421
+ """
422
+ return GLOBAL_TRACE_PROVIDER.create_span(
423
+ span_data=SpeechGroupSpanData(input=input),
424
+ span_id=span_id,
425
+ parent=parent,
426
+ disabled=disabled,
427
+ )
428
+
429
+
430
+ def mcp_tools_span(
431
+ server: str | None = None,
432
+ result: list[str] | None = None,
433
+ span_id: str | None = None,
434
+ parent: Trace | Span[Any] | None = None,
435
+ disabled: bool = False,
436
+ ) -> Span[MCPListToolsSpanData]:
437
+ """Create a new MCP list tools span. The span will not be started automatically, you should
438
+ either do `with mcp_tools_span() ...` or call `span.start()` + `span.finish()` manually.
439
+
440
+ Args:
441
+ server: The name of the MCP server.
442
+ result: The result of the MCP list tools call.
443
+ span_id: The ID of the span. Optional. If not provided, we will generate an ID. We
444
+ recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are
445
+ correctly formatted.
446
+ parent: The parent span or trace. If not provided, we will automatically use the current
447
+ trace/span as the parent.
448
+ disabled: If True, we will return a Span but the Span will not be recorded.
449
+ """
450
+ return GLOBAL_TRACE_PROVIDER.create_span(
451
+ span_data=MCPListToolsSpanData(server=server, result=result),
452
+ span_id=span_id,
453
+ parent=parent,
454
+ disabled=disabled,
455
+ )
@@ -5,6 +5,7 @@ import queue
5
5
  import random
6
6
  import threading
7
7
  import time
8
+ from functools import cached_property
8
9
  from typing import Any
9
10
 
10
11
  import httpx
@@ -50,9 +51,9 @@ class BackendSpanExporter(TracingExporter):
50
51
  base_delay: Base delay (in seconds) for the first backoff.
51
52
  max_delay: Maximum delay (in seconds) for backoff growth.
52
53
  """
53
- self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
54
- self.organization = organization or os.environ.get("OPENAI_ORG_ID")
55
- self.project = project or os.environ.get("OPENAI_PROJECT_ID")
54
+ self._api_key = api_key
55
+ self._organization = organization
56
+ self._project = project
56
57
  self.endpoint = endpoint
57
58
  self.max_retries = max_retries
58
59
  self.base_delay = base_delay
@@ -68,8 +69,22 @@ class BackendSpanExporter(TracingExporter):
68
69
  api_key: The OpenAI API key to use. This is the same key used by the OpenAI Python
69
70
  client.
70
71
  """
72
+ # We're specifically setting the underlying cached property as well
73
+ self._api_key = api_key
71
74
  self.api_key = api_key
72
75
 
76
+ @cached_property
77
+ def api_key(self):
78
+ return self._api_key or os.environ.get("OPENAI_API_KEY")
79
+
80
+ @cached_property
81
+ def organization(self):
82
+ return self._organization or os.environ.get("OPENAI_ORG_ID")
83
+
84
+ @cached_property
85
+ def project(self):
86
+ return self._project or os.environ.get("OPENAI_PROJECT_ID")
87
+
73
88
  def export(self, items: list[Trace | Span[Any]]) -> None:
74
89
  if not items:
75
90
  return
@@ -102,18 +117,22 @@ class BackendSpanExporter(TracingExporter):
102
117
 
103
118
  # If the response is a client error (4xx), we wont retry
104
119
  if 400 <= response.status_code < 500:
105
- logger.error(f"Tracing client error {response.status_code}: {response.text}")
120
+ logger.error(
121
+ f"[non-fatal] Tracing client error {response.status_code}: {response.text}"
122
+ )
106
123
  return
107
124
 
108
125
  # For 5xx or other unexpected codes, treat it as transient and retry
109
- logger.warning(f"Server error {response.status_code}, retrying.")
126
+ logger.warning(
127
+ f"[non-fatal] Tracing: server error {response.status_code}, retrying."
128
+ )
110
129
  except httpx.RequestError as exc:
111
130
  # Network or other I/O error, we'll retry
112
- logger.warning(f"Request failed: {exc}")
131
+ logger.warning(f"[non-fatal] Tracing: request failed: {exc}")
113
132
 
114
133
  # If we reach here, we need to retry or give up
115
134
  if attempt >= self.max_retries:
116
- logger.error("Max retries reached, giving up on this batch.")
135
+ logger.error("[non-fatal] Tracing: max retries reached, giving up on this batch.")
117
136
  return
118
137
 
119
138
  # Exponential backoff + jitter
@@ -49,12 +49,19 @@ class AgentSpanData(SpanData):
49
49
 
50
50
 
51
51
  class FunctionSpanData(SpanData):
52
- __slots__ = ("name", "input", "output")
52
+ __slots__ = ("name", "input", "output", "mcp_data")
53
53
 
54
- def __init__(self, name: str, input: str | None, output: Any | None):
54
+ def __init__(
55
+ self,
56
+ name: str,
57
+ input: str | None,
58
+ output: Any | None,
59
+ mcp_data: dict[str, Any] | None = None,
60
+ ):
55
61
  self.name = name
56
62
  self.input = input
57
63
  self.output = output
64
+ self.mcp_data = mcp_data
58
65
 
59
66
  @property
60
67
  def type(self) -> str:
@@ -66,6 +73,7 @@ class FunctionSpanData(SpanData):
66
73
  "name": self.name,
67
74
  "input": self.input,
68
75
  "output": str(self.output) if self.output else None,
76
+ "mcp_data": self.mcp_data,
69
77
  }
70
78
 
71
79
 
@@ -186,3 +194,121 @@ class GuardrailSpanData(SpanData):
186
194
  "name": self.name,
187
195
  "triggered": self.triggered,
188
196
  }
197
+
198
+
199
+ class TranscriptionSpanData(SpanData):
200
+ __slots__ = (
201
+ "input",
202
+ "output",
203
+ "model",
204
+ "model_config",
205
+ )
206
+
207
+ def __init__(
208
+ self,
209
+ input: str | None = None,
210
+ input_format: str | None = "pcm",
211
+ output: str | None = None,
212
+ model: str | None = None,
213
+ model_config: Mapping[str, Any] | None = None,
214
+ ):
215
+ self.input = input
216
+ self.input_format = input_format
217
+ self.output = output
218
+ self.model = model
219
+ self.model_config = model_config
220
+
221
+ @property
222
+ def type(self) -> str:
223
+ return "transcription"
224
+
225
+ def export(self) -> dict[str, Any]:
226
+ return {
227
+ "type": self.type,
228
+ "input": {
229
+ "data": self.input or "",
230
+ "format": self.input_format,
231
+ },
232
+ "output": self.output,
233
+ "model": self.model,
234
+ "model_config": self.model_config,
235
+ }
236
+
237
+
238
+ class SpeechSpanData(SpanData):
239
+ __slots__ = ("input", "output", "model", "model_config", "first_byte_at")
240
+
241
+ def __init__(
242
+ self,
243
+ input: str | None = None,
244
+ output: str | None = None,
245
+ output_format: str | None = "pcm",
246
+ model: str | None = None,
247
+ model_config: Mapping[str, Any] | None = None,
248
+ first_content_at: str | None = None,
249
+ ):
250
+ self.input = input
251
+ self.output = output
252
+ self.output_format = output_format
253
+ self.model = model
254
+ self.model_config = model_config
255
+ self.first_content_at = first_content_at
256
+
257
+ @property
258
+ def type(self) -> str:
259
+ return "speech"
260
+
261
+ def export(self) -> dict[str, Any]:
262
+ return {
263
+ "type": self.type,
264
+ "input": self.input,
265
+ "output": {
266
+ "data": self.output or "",
267
+ "format": self.output_format,
268
+ },
269
+ "model": self.model,
270
+ "model_config": self.model_config,
271
+ "first_content_at": self.first_content_at,
272
+ }
273
+
274
+
275
+ class SpeechGroupSpanData(SpanData):
276
+ __slots__ = "input"
277
+
278
+ def __init__(
279
+ self,
280
+ input: str | None = None,
281
+ ):
282
+ self.input = input
283
+
284
+ @property
285
+ def type(self) -> str:
286
+ return "speech-group"
287
+
288
+ def export(self) -> dict[str, Any]:
289
+ return {
290
+ "type": self.type,
291
+ "input": self.input,
292
+ }
293
+
294
+
295
+ class MCPListToolsSpanData(SpanData):
296
+ __slots__ = (
297
+ "server",
298
+ "result",
299
+ )
300
+
301
+ def __init__(self, server: str | None = None, result: list[str] | None = None):
302
+ self.server = server
303
+ self.result = result
304
+
305
+ @property
306
+ def type(self) -> str:
307
+ return "mcp_tools"
308
+
309
+ def export(self) -> dict[str, Any]:
310
+ return {
311
+ "type": self.type,
312
+ "server": self.server,
313
+ "result": self.result,
314
+ }
agents/tracing/util.py CHANGED
@@ -15,3 +15,8 @@ def gen_trace_id() -> str:
15
15
  def gen_span_id() -> str:
16
16
  """Generates a new span ID."""
17
17
  return f"span_{uuid.uuid4().hex[:24]}"
18
+
19
+
20
+ def gen_group_id() -> str:
21
+ """Generates a new group ID."""
22
+ return f"group_{uuid.uuid4().hex[:24]}"