openai-agents 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (50) hide show
  1. agents/__init__.py +22 -5
  2. agents/_run_impl.py +101 -22
  3. agents/agent.py +55 -7
  4. agents/agent_output.py +4 -4
  5. agents/function_schema.py +4 -0
  6. agents/guardrail.py +1 -1
  7. agents/handoffs.py +4 -4
  8. agents/items.py +4 -2
  9. agents/models/openai_chatcompletions.py +6 -1
  10. agents/models/openai_provider.py +13 -0
  11. agents/result.py +7 -0
  12. agents/run.py +10 -10
  13. agents/tool.py +34 -10
  14. agents/tracing/__init__.py +12 -0
  15. agents/tracing/create.py +122 -2
  16. agents/tracing/processors.py +2 -2
  17. agents/tracing/scope.py +1 -1
  18. agents/tracing/setup.py +1 -1
  19. agents/tracing/span_data.py +98 -2
  20. agents/tracing/spans.py +1 -1
  21. agents/tracing/traces.py +1 -1
  22. agents/tracing/util.py +5 -0
  23. agents/util/__init__.py +0 -0
  24. agents/util/_coro.py +2 -0
  25. agents/util/_error_tracing.py +16 -0
  26. agents/util/_json.py +31 -0
  27. agents/util/_pretty_print.py +56 -0
  28. agents/util/_transforms.py +11 -0
  29. agents/util/_types.py +7 -0
  30. agents/voice/__init__.py +51 -0
  31. agents/voice/events.py +47 -0
  32. agents/voice/exceptions.py +8 -0
  33. agents/voice/imports.py +11 -0
  34. agents/voice/input.py +88 -0
  35. agents/voice/model.py +193 -0
  36. agents/voice/models/__init__.py +0 -0
  37. agents/voice/models/openai_model_provider.py +97 -0
  38. agents/voice/models/openai_stt.py +457 -0
  39. agents/voice/models/openai_tts.py +54 -0
  40. agents/voice/pipeline.py +151 -0
  41. agents/voice/pipeline_config.py +46 -0
  42. agents/voice/result.py +287 -0
  43. agents/voice/utils.py +37 -0
  44. agents/voice/workflow.py +93 -0
  45. {openai_agents-0.0.4.dist-info → openai_agents-0.0.6.dist-info}/METADATA +9 -4
  46. openai_agents-0.0.6.dist-info/RECORD +70 -0
  47. agents/_utils.py +0 -61
  48. openai_agents-0.0.4.dist-info/RECORD +0 -49
  49. {openai_agents-0.0.4.dist-info → openai_agents-0.0.6.dist-info}/WHEEL +0 -0
  50. {openai_agents-0.0.4.dist-info → openai_agents-0.0.6.dist-info}/licenses/LICENSE +0 -0
agents/run.py CHANGED
@@ -7,7 +7,6 @@ from typing import Any, cast
7
7
 
8
8
  from openai.types.responses import ResponseCompletedEvent
9
9
 
10
- from . import Model, _utils
11
10
  from ._run_impl import (
12
11
  NextStepFinalOutput,
13
12
  NextStepHandoff,
@@ -33,7 +32,7 @@ from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
33
32
  from .lifecycle import RunHooks
34
33
  from .logger import logger
35
34
  from .model_settings import ModelSettings
36
- from .models.interface import ModelProvider
35
+ from .models.interface import Model, ModelProvider
37
36
  from .models.openai_provider import OpenAIProvider
38
37
  from .result import RunResult, RunResultStreaming
39
38
  from .run_context import RunContextWrapper, TContext
@@ -41,6 +40,7 @@ from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent
41
40
  from .tracing import Span, SpanError, agent_span, get_current_trace, trace
42
41
  from .tracing.span_data import AgentSpanData
43
42
  from .usage import Usage
43
+ from .util import _coro, _error_tracing
44
44
 
45
45
  DEFAULT_MAX_TURNS = 10
46
46
 
@@ -193,7 +193,7 @@ class Runner:
193
193
 
194
194
  current_turn += 1
195
195
  if current_turn > max_turns:
196
- _utils.attach_error_to_span(
196
+ _error_tracing.attach_error_to_span(
197
197
  current_span,
198
198
  SpanError(
199
199
  message="Max turns exceeded",
@@ -447,7 +447,7 @@ class Runner:
447
447
  for done in asyncio.as_completed(guardrail_tasks):
448
448
  result = await done
449
449
  if result.output.tripwire_triggered:
450
- _utils.attach_error_to_span(
450
+ _error_tracing.attach_error_to_span(
451
451
  parent_span,
452
452
  SpanError(
453
453
  message="Guardrail tripwire triggered",
@@ -511,7 +511,7 @@ class Runner:
511
511
  streamed_result.current_turn = current_turn
512
512
 
513
513
  if current_turn > max_turns:
514
- _utils.attach_error_to_span(
514
+ _error_tracing.attach_error_to_span(
515
515
  current_span,
516
516
  SpanError(
517
517
  message="Max turns exceeded",
@@ -583,7 +583,7 @@ class Runner:
583
583
  pass
584
584
  except Exception as e:
585
585
  if current_span:
586
- _utils.attach_error_to_span(
586
+ _error_tracing.attach_error_to_span(
587
587
  current_span,
588
588
  SpanError(
589
589
  message="Error in agent run",
@@ -615,7 +615,7 @@ class Runner:
615
615
  (
616
616
  agent.hooks.on_start(context_wrapper, agent)
617
617
  if agent.hooks
618
- else _utils.noop_coroutine()
618
+ else _coro.noop_coroutine()
619
619
  ),
620
620
  )
621
621
 
@@ -705,7 +705,7 @@ class Runner:
705
705
  (
706
706
  agent.hooks.on_start(context_wrapper, agent)
707
707
  if agent.hooks
708
- else _utils.noop_coroutine()
708
+ else _coro.noop_coroutine()
709
709
  ),
710
710
  )
711
711
 
@@ -796,7 +796,7 @@ class Runner:
796
796
  # Cancel all guardrail tasks if a tripwire is triggered.
797
797
  for t in guardrail_tasks:
798
798
  t.cancel()
799
- _utils.attach_error_to_current_span(
799
+ _error_tracing.attach_error_to_current_span(
800
800
  SpanError(
801
801
  message="Guardrail tripwire triggered",
802
802
  data={"guardrail": result.guardrail.get_name()},
@@ -834,7 +834,7 @@ class Runner:
834
834
  # Cancel all guardrail tasks if a tripwire is triggered.
835
835
  for t in guardrail_tasks:
836
836
  t.cancel()
837
- _utils.attach_error_to_current_span(
837
+ _error_tracing.attach_error_to_current_span(
838
838
  SpanError(
839
839
  message="Guardrail tripwire triggered",
840
840
  data={"guardrail": result.guardrail.get_name()},
agents/tool.py CHANGED
@@ -11,14 +11,16 @@ from openai.types.responses.web_search_tool_param import UserLocation
11
11
  from pydantic import ValidationError
12
12
  from typing_extensions import Concatenate, ParamSpec
13
13
 
14
- from . import _debug, _utils
15
- from ._utils import MaybeAwaitable
14
+ from . import _debug
16
15
  from .computer import AsyncComputer, Computer
17
16
  from .exceptions import ModelBehaviorError
18
17
  from .function_schema import DocstringStyle, function_schema
18
+ from .items import RunItem
19
19
  from .logger import logger
20
20
  from .run_context import RunContextWrapper
21
21
  from .tracing import SpanError
22
+ from .util import _error_tracing
23
+ from .util._types import MaybeAwaitable
22
24
 
23
25
  ToolParams = ParamSpec("ToolParams")
24
26
 
@@ -28,6 +30,18 @@ ToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParam
28
30
  ToolFunction = Union[ToolFunctionWithoutContext[ToolParams], ToolFunctionWithContext[ToolParams]]
29
31
 
30
32
 
33
+ @dataclass
34
+ class FunctionToolResult:
35
+ tool: FunctionTool
36
+ """The tool that was run."""
37
+
38
+ output: Any
39
+ """The output of the tool."""
40
+
41
+ run_item: RunItem
42
+ """The run item that was produced as a result of the tool call."""
43
+
44
+
31
45
  @dataclass
32
46
  class FunctionTool:
33
47
  """A tool that wraps a function. In most cases, you should use the `function_tool` helpers to
@@ -43,15 +57,15 @@ class FunctionTool:
43
57
  params_json_schema: dict[str, Any]
44
58
  """The JSON schema for the tool's parameters."""
45
59
 
46
- on_invoke_tool: Callable[[RunContextWrapper[Any], str], Awaitable[str]]
60
+ on_invoke_tool: Callable[[RunContextWrapper[Any], str], Awaitable[Any]]
47
61
  """A function that invokes the tool with the given context and parameters. The params passed
48
62
  are:
49
63
  1. The tool run context.
50
64
  2. The arguments from the LLM, as a JSON string.
51
65
 
52
- You must return a string representation of the tool output. In case of errors, you can either
53
- raise an Exception (which will cause the run to fail) or return a string error message (which
54
- will be sent back to the LLM).
66
+ You must return a string representation of the tool output, or something we can call `str()` on.
67
+ In case of errors, you can either raise an Exception (which will cause the run to fail) or
68
+ return a string error message (which will be sent back to the LLM).
55
69
  """
56
70
 
57
71
  strict_json_schema: bool = True
@@ -137,6 +151,7 @@ def function_tool(
137
151
  docstring_style: DocstringStyle | None = None,
138
152
  use_docstring_info: bool = True,
139
153
  failure_error_function: ToolErrorFunction | None = None,
154
+ strict_mode: bool = True,
140
155
  ) -> FunctionTool:
141
156
  """Overload for usage as @function_tool (no parentheses)."""
142
157
  ...
@@ -150,6 +165,7 @@ def function_tool(
150
165
  docstring_style: DocstringStyle | None = None,
151
166
  use_docstring_info: bool = True,
152
167
  failure_error_function: ToolErrorFunction | None = None,
168
+ strict_mode: bool = True,
153
169
  ) -> Callable[[ToolFunction[...]], FunctionTool]:
154
170
  """Overload for usage as @function_tool(...)."""
155
171
  ...
@@ -163,6 +179,7 @@ def function_tool(
163
179
  docstring_style: DocstringStyle | None = None,
164
180
  use_docstring_info: bool = True,
165
181
  failure_error_function: ToolErrorFunction | None = default_tool_error_function,
182
+ strict_mode: bool = True,
166
183
  ) -> FunctionTool | Callable[[ToolFunction[...]], FunctionTool]:
167
184
  """
168
185
  Decorator to create a FunctionTool from a function. By default, we will:
@@ -186,6 +203,11 @@ def function_tool(
186
203
  failure_error_function: If provided, use this function to generate an error message when
187
204
  the tool call fails. The error message is sent to the LLM. If you pass None, then no
188
205
  error message will be sent and instead an Exception will be raised.
206
+ strict_mode: Whether to enable strict mode for the tool's JSON schema. We *strongly*
207
+ recommend setting this to True, as it increases the likelihood of correct JSON input.
208
+ If False, it allows non-strict JSON schemas. For example, if a parameter has a default
209
+ value, it will be optional, additional properties are allowed, etc. See here for more:
210
+ https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#supported-schemas
189
211
  """
190
212
 
191
213
  def _create_function_tool(the_func: ToolFunction[...]) -> FunctionTool:
@@ -195,9 +217,10 @@ def function_tool(
195
217
  description_override=description_override,
196
218
  docstring_style=docstring_style,
197
219
  use_docstring_info=use_docstring_info,
220
+ strict_json_schema=strict_mode,
198
221
  )
199
222
 
200
- async def _on_invoke_tool_impl(ctx: RunContextWrapper[Any], input: str) -> str:
223
+ async def _on_invoke_tool_impl(ctx: RunContextWrapper[Any], input: str) -> Any:
201
224
  try:
202
225
  json_data: dict[str, Any] = json.loads(input) if input else {}
203
226
  except Exception as e:
@@ -244,9 +267,9 @@ def function_tool(
244
267
  else:
245
268
  logger.debug(f"Tool {schema.name} returned {result}")
246
269
 
247
- return str(result)
270
+ return result
248
271
 
249
- async def _on_invoke_tool(ctx: RunContextWrapper[Any], input: str) -> str:
272
+ async def _on_invoke_tool(ctx: RunContextWrapper[Any], input: str) -> Any:
250
273
  try:
251
274
  return await _on_invoke_tool_impl(ctx, input)
252
275
  except Exception as e:
@@ -257,7 +280,7 @@ def function_tool(
257
280
  if inspect.isawaitable(result):
258
281
  return await result
259
282
 
260
- _utils.attach_error_to_current_span(
283
+ _error_tracing.attach_error_to_current_span(
261
284
  SpanError(
262
285
  message="Error running tool (non-fatal)",
263
286
  data={
@@ -273,6 +296,7 @@ def function_tool(
273
296
  description=schema.description or "",
274
297
  params_json_schema=schema.params_json_schema,
275
298
  on_invoke_tool=_on_invoke_tool,
299
+ strict_json_schema=strict_mode,
276
300
  )
277
301
 
278
302
  # If func is actually a callable, we were used as @function_tool with no parentheses
@@ -10,7 +10,10 @@ from .create import (
10
10
  guardrail_span,
11
11
  handoff_span,
12
12
  response_span,
13
+ speech_group_span,
14
+ speech_span,
13
15
  trace,
16
+ transcription_span,
14
17
  )
15
18
  from .processor_interface import TracingProcessor
16
19
  from .processors import default_exporter, default_processor
@@ -24,6 +27,9 @@ from .span_data import (
24
27
  HandoffSpanData,
25
28
  ResponseSpanData,
26
29
  SpanData,
30
+ SpeechGroupSpanData,
31
+ SpeechSpanData,
32
+ TranscriptionSpanData,
27
33
  )
28
34
  from .spans import Span, SpanError
29
35
  from .traces import Trace
@@ -54,9 +60,15 @@ __all__ = [
54
60
  "GuardrailSpanData",
55
61
  "HandoffSpanData",
56
62
  "ResponseSpanData",
63
+ "SpeechGroupSpanData",
64
+ "SpeechSpanData",
65
+ "TranscriptionSpanData",
57
66
  "TracingProcessor",
58
67
  "gen_trace_id",
59
68
  "gen_span_id",
69
+ "speech_group_span",
70
+ "speech_span",
71
+ "transcription_span",
60
72
  ]
61
73
 
62
74
 
agents/tracing/create.py CHANGED
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  from collections.abc import Mapping, Sequence
4
4
  from typing import TYPE_CHECKING, Any
5
5
 
6
- from .logger import logger
6
+ from ..logger import logger
7
7
  from .setup import GLOBAL_TRACE_PROVIDER
8
8
  from .span_data import (
9
9
  AgentSpanData,
@@ -13,6 +13,9 @@ from .span_data import (
13
13
  GuardrailSpanData,
14
14
  HandoffSpanData,
15
15
  ResponseSpanData,
16
+ SpeechGroupSpanData,
17
+ SpeechSpanData,
18
+ TranscriptionSpanData,
16
19
  )
17
20
  from .spans import Span
18
21
  from .traces import Trace
@@ -181,7 +184,11 @@ def generation_span(
181
184
  """
182
185
  return GLOBAL_TRACE_PROVIDER.create_span(
183
186
  span_data=GenerationSpanData(
184
- input=input, output=output, model=model, model_config=model_config, usage=usage
187
+ input=input,
188
+ output=output,
189
+ model=model,
190
+ model_config=model_config,
191
+ usage=usage,
185
192
  ),
186
193
  span_id=span_id,
187
194
  parent=parent,
@@ -304,3 +311,116 @@ def guardrail_span(
304
311
  parent=parent,
305
312
  disabled=disabled,
306
313
  )
314
+
315
+
316
+ def transcription_span(
317
+ model: str | None = None,
318
+ input: str | None = None,
319
+ input_format: str | None = "pcm",
320
+ output: str | None = None,
321
+ model_config: Mapping[str, Any] | None = None,
322
+ span_id: str | None = None,
323
+ parent: Trace | Span[Any] | None = None,
324
+ disabled: bool = False,
325
+ ) -> Span[TranscriptionSpanData]:
326
+ """Create a new transcription span. The span will not be started automatically, you should
327
+ either do `with transcription_span() ...` or call `span.start()` + `span.finish()` manually.
328
+
329
+ Args:
330
+ model: The name of the model used for the speech-to-text.
331
+ input: The audio input of the speech-to-text transcription, as a base64 encoded string of
332
+ audio bytes.
333
+ input_format: The format of the audio input (defaults to "pcm").
334
+ output: The output of the speech-to-text transcription.
335
+ model_config: The model configuration (hyperparameters) used.
336
+ span_id: The ID of the span. Optional. If not provided, we will generate an ID. We
337
+ recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are
338
+ correctly formatted.
339
+ parent: The parent span or trace. If not provided, we will automatically use the current
340
+ trace/span as the parent.
341
+ disabled: If True, we will return a Span but the Span will not be recorded.
342
+
343
+ Returns:
344
+ The newly created speech-to-text span.
345
+ """
346
+ return GLOBAL_TRACE_PROVIDER.create_span(
347
+ span_data=TranscriptionSpanData(
348
+ input=input,
349
+ input_format=input_format,
350
+ output=output,
351
+ model=model,
352
+ model_config=model_config,
353
+ ),
354
+ span_id=span_id,
355
+ parent=parent,
356
+ disabled=disabled,
357
+ )
358
+
359
+
360
+ def speech_span(
361
+ model: str | None = None,
362
+ input: str | None = None,
363
+ output: str | None = None,
364
+ output_format: str | None = "pcm",
365
+ model_config: Mapping[str, Any] | None = None,
366
+ first_content_at: str | None = None,
367
+ span_id: str | None = None,
368
+ parent: Trace | Span[Any] | None = None,
369
+ disabled: bool = False,
370
+ ) -> Span[SpeechSpanData]:
371
+ """Create a new speech span. The span will not be started automatically, you should either do
372
+ `with speech_span() ...` or call `span.start()` + `span.finish()` manually.
373
+
374
+ Args:
375
+ model: The name of the model used for the text-to-speech.
376
+ input: The text input of the text-to-speech.
377
+ output: The audio output of the text-to-speech as base64 encoded string of PCM audio bytes.
378
+ output_format: The format of the audio output (defaults to "pcm").
379
+ model_config: The model configuration (hyperparameters) used.
380
+ first_content_at: The time of the first byte of the audio output.
381
+ span_id: The ID of the span. Optional. If not provided, we will generate an ID. We
382
+ recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are
383
+ correctly formatted.
384
+ parent: The parent span or trace. If not provided, we will automatically use the current
385
+ trace/span as the parent.
386
+ disabled: If True, we will return a Span but the Span will not be recorded.
387
+ """
388
+ return GLOBAL_TRACE_PROVIDER.create_span(
389
+ span_data=SpeechSpanData(
390
+ model=model,
391
+ input=input,
392
+ output=output,
393
+ output_format=output_format,
394
+ model_config=model_config,
395
+ first_content_at=first_content_at,
396
+ ),
397
+ span_id=span_id,
398
+ parent=parent,
399
+ disabled=disabled,
400
+ )
401
+
402
+
403
+ def speech_group_span(
404
+ input: str | None = None,
405
+ span_id: str | None = None,
406
+ parent: Trace | Span[Any] | None = None,
407
+ disabled: bool = False,
408
+ ) -> Span[SpeechGroupSpanData]:
409
+ """Create a new speech group span. The span will not be started automatically, you should
410
+ either do `with speech_group_span() ...` or call `span.start()` + `span.finish()` manually.
411
+
412
+ Args:
413
+ input: The input text used for the speech request.
414
+ span_id: The ID of the span. Optional. If not provided, we will generate an ID. We
415
+ recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are
416
+ correctly formatted.
417
+ parent: The parent span or trace. If not provided, we will automatically use the current
418
+ trace/span as the parent.
419
+ disabled: If True, we will return a Span but the Span will not be recorded.
420
+ """
421
+ return GLOBAL_TRACE_PROVIDER.create_span(
422
+ span_data=SpeechGroupSpanData(input=input),
423
+ span_id=span_id,
424
+ parent=parent,
425
+ disabled=disabled,
426
+ )
@@ -9,7 +9,7 @@ from typing import Any
9
9
 
10
10
  import httpx
11
11
 
12
- from .logger import logger
12
+ from ..logger import logger
13
13
  from .processor_interface import TracingExporter, TracingProcessor
14
14
  from .spans import Span
15
15
  from .traces import Trace
@@ -40,7 +40,7 @@ class BackendSpanExporter(TracingExporter):
40
40
  """
41
41
  Args:
42
42
  api_key: The API key for the "Authorization" header. Defaults to
43
- `os.environ["OPENAI_TRACE_API_KEY"]` if not provided.
43
+ `os.environ["OPENAI_API_KEY"]` if not provided.
44
44
  organization: The OpenAI organization to use. Defaults to
45
45
  `os.environ["OPENAI_ORG_ID"]` if not provided.
46
46
  project: The OpenAI project to use. Defaults to
agents/tracing/scope.py CHANGED
@@ -2,7 +2,7 @@
2
2
  import contextvars
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
- from .logger import logger
5
+ from ..logger import logger
6
6
 
7
7
  if TYPE_CHECKING:
8
8
  from .spans import Span
agents/tracing/setup.py CHANGED
@@ -4,8 +4,8 @@ import os
4
4
  import threading
5
5
  from typing import Any
6
6
 
7
+ from ..logger import logger
7
8
  from . import util
8
- from .logger import logger
9
9
  from .processor_interface import TracingProcessor
10
10
  from .scope import Scope
11
11
  from .spans import NoOpSpan, Span, SpanImpl, TSpanData
@@ -51,7 +51,7 @@ class AgentSpanData(SpanData):
51
51
  class FunctionSpanData(SpanData):
52
52
  __slots__ = ("name", "input", "output")
53
53
 
54
- def __init__(self, name: str, input: str | None, output: str | None):
54
+ def __init__(self, name: str, input: str | None, output: Any | None):
55
55
  self.name = name
56
56
  self.input = input
57
57
  self.output = output
@@ -65,7 +65,7 @@ class FunctionSpanData(SpanData):
65
65
  "type": self.type,
66
66
  "name": self.name,
67
67
  "input": self.input,
68
- "output": self.output,
68
+ "output": str(self.output) if self.output else None,
69
69
  }
70
70
 
71
71
 
@@ -186,3 +186,99 @@ class GuardrailSpanData(SpanData):
186
186
  "name": self.name,
187
187
  "triggered": self.triggered,
188
188
  }
189
+
190
+
191
+ class TranscriptionSpanData(SpanData):
192
+ __slots__ = (
193
+ "input",
194
+ "output",
195
+ "model",
196
+ "model_config",
197
+ )
198
+
199
+ def __init__(
200
+ self,
201
+ input: str | None = None,
202
+ input_format: str | None = "pcm",
203
+ output: str | None = None,
204
+ model: str | None = None,
205
+ model_config: Mapping[str, Any] | None = None,
206
+ ):
207
+ self.input = input
208
+ self.input_format = input_format
209
+ self.output = output
210
+ self.model = model
211
+ self.model_config = model_config
212
+
213
+ @property
214
+ def type(self) -> str:
215
+ return "transcription"
216
+
217
+ def export(self) -> dict[str, Any]:
218
+ return {
219
+ "type": self.type,
220
+ "input": {
221
+ "data": self.input or "",
222
+ "format": self.input_format,
223
+ },
224
+ "output": self.output,
225
+ "model": self.model,
226
+ "model_config": self.model_config,
227
+ }
228
+
229
+
230
+ class SpeechSpanData(SpanData):
231
+ __slots__ = ("input", "output", "model", "model_config", "first_byte_at")
232
+
233
+ def __init__(
234
+ self,
235
+ input: str | None = None,
236
+ output: str | None = None,
237
+ output_format: str | None = "pcm",
238
+ model: str | None = None,
239
+ model_config: Mapping[str, Any] | None = None,
240
+ first_content_at: str | None = None,
241
+ ):
242
+ self.input = input
243
+ self.output = output
244
+ self.output_format = output_format
245
+ self.model = model
246
+ self.model_config = model_config
247
+ self.first_content_at = first_content_at
248
+
249
+ @property
250
+ def type(self) -> str:
251
+ return "speech"
252
+
253
+ def export(self) -> dict[str, Any]:
254
+ return {
255
+ "type": self.type,
256
+ "input": self.input,
257
+ "output": {
258
+ "data": self.output or "",
259
+ "format": self.output_format,
260
+ },
261
+ "model": self.model,
262
+ "model_config": self.model_config,
263
+ "first_content_at": self.first_content_at,
264
+ }
265
+
266
+
267
+ class SpeechGroupSpanData(SpanData):
268
+ __slots__ = "input"
269
+
270
+ def __init__(
271
+ self,
272
+ input: str | None = None,
273
+ ):
274
+ self.input = input
275
+
276
+ @property
277
+ def type(self) -> str:
278
+ return "speech-group"
279
+
280
+ def export(self) -> dict[str, Any]:
281
+ return {
282
+ "type": self.type,
283
+ "input": self.input,
284
+ }
agents/tracing/spans.py CHANGED
@@ -6,8 +6,8 @@ from typing import Any, Generic, TypeVar
6
6
 
7
7
  from typing_extensions import TypedDict
8
8
 
9
+ from ..logger import logger
9
10
  from . import util
10
- from .logger import logger
11
11
  from .processor_interface import TracingProcessor
12
12
  from .scope import Scope
13
13
  from .span_data import SpanData
agents/tracing/traces.py CHANGED
@@ -4,8 +4,8 @@ import abc
4
4
  import contextvars
5
5
  from typing import Any
6
6
 
7
+ from ..logger import logger
7
8
  from . import util
8
- from .logger import logger
9
9
  from .processor_interface import TracingProcessor
10
10
  from .scope import Scope
11
11
 
agents/tracing/util.py CHANGED
@@ -15,3 +15,8 @@ def gen_trace_id() -> str:
15
15
  def gen_span_id() -> str:
16
16
  """Generates a new span ID."""
17
17
  return f"span_{uuid.uuid4().hex[:24]}"
18
+
19
+
20
+ def gen_group_id() -> str:
21
+ """Generates a new group ID."""
22
+ return f"group_{uuid.uuid4().hex[:24]}"
File without changes
agents/util/_coro.py ADDED
@@ -0,0 +1,2 @@
1
+ async def noop_coroutine() -> None:
2
+ pass
@@ -0,0 +1,16 @@
1
+ from typing import Any
2
+
3
+ from ..logger import logger
4
+ from ..tracing import Span, SpanError, get_current_span
5
+
6
+
7
+ def attach_error_to_span(span: Span[Any], error: SpanError) -> None:
8
+ span.set_error(error)
9
+
10
+
11
+ def attach_error_to_current_span(error: SpanError) -> None:
12
+ span = get_current_span()
13
+ if span:
14
+ attach_error_to_span(span, error)
15
+ else:
16
+ logger.warning(f"No span to add error {error} to")
agents/util/_json.py ADDED
@@ -0,0 +1,31 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+ from pydantic import TypeAdapter, ValidationError
6
+ from typing_extensions import TypeVar
7
+
8
+ from ..exceptions import ModelBehaviorError
9
+ from ..tracing import SpanError
10
+ from ._error_tracing import attach_error_to_current_span
11
+
12
+ T = TypeVar("T")
13
+
14
+
15
+ def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T:
16
+ partial_setting: bool | Literal["off", "on", "trailing-strings"] = (
17
+ "trailing-strings" if partial else False
18
+ )
19
+ try:
20
+ validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting)
21
+ return validated
22
+ except ValidationError as e:
23
+ attach_error_to_current_span(
24
+ SpanError(
25
+ message="Invalid JSON provided",
26
+ data={},
27
+ )
28
+ )
29
+ raise ModelBehaviorError(
30
+ f"Invalid JSON when parsing {json_str} for {type_adapter}; {e}"
31
+ ) from e