openai-sdk-helpers 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. openai_sdk_helpers/__init__.py +85 -10
  2. openai_sdk_helpers/agent/__init__.py +8 -4
  3. openai_sdk_helpers/agent/base.py +81 -46
  4. openai_sdk_helpers/agent/config.py +6 -4
  5. openai_sdk_helpers/agent/{project_manager.py → coordination.py} +29 -45
  6. openai_sdk_helpers/agent/prompt_utils.py +7 -1
  7. openai_sdk_helpers/agent/runner.py +67 -141
  8. openai_sdk_helpers/agent/search/__init__.py +33 -0
  9. openai_sdk_helpers/agent/search/base.py +297 -0
  10. openai_sdk_helpers/agent/{vector_search.py → search/vector.py} +89 -157
  11. openai_sdk_helpers/agent/{web_search.py → search/web.py} +82 -162
  12. openai_sdk_helpers/agent/summarizer.py +29 -8
  13. openai_sdk_helpers/agent/translator.py +40 -13
  14. openai_sdk_helpers/agent/validation.py +32 -8
  15. openai_sdk_helpers/async_utils.py +132 -0
  16. openai_sdk_helpers/config.py +74 -36
  17. openai_sdk_helpers/context_manager.py +241 -0
  18. openai_sdk_helpers/enums/__init__.py +9 -1
  19. openai_sdk_helpers/enums/base.py +67 -8
  20. openai_sdk_helpers/environment.py +33 -6
  21. openai_sdk_helpers/errors.py +133 -0
  22. openai_sdk_helpers/logging_config.py +105 -0
  23. openai_sdk_helpers/prompt/__init__.py +10 -71
  24. openai_sdk_helpers/prompt/base.py +172 -0
  25. openai_sdk_helpers/response/__init__.py +37 -5
  26. openai_sdk_helpers/response/base.py +427 -189
  27. openai_sdk_helpers/response/config.py +176 -0
  28. openai_sdk_helpers/response/messages.py +104 -40
  29. openai_sdk_helpers/response/runner.py +79 -35
  30. openai_sdk_helpers/response/tool_call.py +75 -12
  31. openai_sdk_helpers/response/vector_store.py +29 -16
  32. openai_sdk_helpers/retry.py +175 -0
  33. openai_sdk_helpers/streamlit_app/__init__.py +30 -0
  34. openai_sdk_helpers/streamlit_app/app.py +345 -0
  35. openai_sdk_helpers/streamlit_app/config.py +502 -0
  36. openai_sdk_helpers/streamlit_app/streamlit_web_search.py +68 -0
  37. openai_sdk_helpers/structure/__init__.py +69 -3
  38. openai_sdk_helpers/structure/agent_blueprint.py +82 -19
  39. openai_sdk_helpers/structure/base.py +245 -91
  40. openai_sdk_helpers/structure/plan/__init__.py +15 -1
  41. openai_sdk_helpers/structure/plan/enum.py +41 -5
  42. openai_sdk_helpers/structure/plan/plan.py +101 -45
  43. openai_sdk_helpers/structure/plan/task.py +38 -6
  44. openai_sdk_helpers/structure/prompt.py +21 -2
  45. openai_sdk_helpers/structure/responses.py +52 -11
  46. openai_sdk_helpers/structure/summary.py +55 -7
  47. openai_sdk_helpers/structure/validation.py +34 -6
  48. openai_sdk_helpers/structure/vector_search.py +132 -18
  49. openai_sdk_helpers/structure/web_search.py +128 -12
  50. openai_sdk_helpers/types.py +57 -0
  51. openai_sdk_helpers/utils/__init__.py +32 -1
  52. openai_sdk_helpers/utils/core.py +200 -32
  53. openai_sdk_helpers/validation.py +302 -0
  54. openai_sdk_helpers/vector_storage/__init__.py +21 -1
  55. openai_sdk_helpers/vector_storage/cleanup.py +25 -13
  56. openai_sdk_helpers/vector_storage/storage.py +124 -66
  57. openai_sdk_helpers/vector_storage/types.py +20 -19
  58. openai_sdk_helpers-0.0.9.dist-info/METADATA +550 -0
  59. openai_sdk_helpers-0.0.9.dist-info/RECORD +66 -0
  60. openai_sdk_helpers-0.0.7.dist-info/METADATA +0 -193
  61. openai_sdk_helpers-0.0.7.dist-info/RECORD +0 -51
  62. {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/WHEEL +0 -0
  63. {openai_sdk_helpers-0.0.7.dist-info → openai_sdk_helpers-0.0.9.dist-info}/licenses/LICENSE +0 -0
@@ -1,28 +1,28 @@
1
- """Base structure definitions for shared agent structures."""
1
+ """Base classes for structured output models.
2
+
3
+ This module provides the foundational BaseStructure class and utilities for
4
+ defining Pydantic-based structured output models with OpenAI-compatible schema
5
+ generation, validation, and serialization.
6
+ """
2
7
 
3
8
  from __future__ import annotations
4
9
 
5
10
  # Standard library imports
11
+ import ast
6
12
  import inspect
7
13
  import json
8
14
  import logging
9
- from abc import ABC, abstractmethod
10
- from enum import Enum
11
- from pathlib import Path
12
15
  from collections.abc import Mapping, Sequence
13
16
  from dataclasses import dataclass
17
+ from enum import Enum
18
+ from pathlib import Path
14
19
  from typing import (
15
20
  Any,
16
21
  ClassVar,
17
- Dict,
18
- List,
19
- Optional,
20
- Type,
21
22
  TypeVar,
22
- Union,
23
+ cast,
23
24
  get_args,
24
25
  get_origin,
25
- cast,
26
26
  )
27
27
 
28
28
  # Third-party imports
@@ -39,43 +39,78 @@ DEFAULT_DATA_PATH: Path | None = None
39
39
 
40
40
 
41
41
  class BaseStructure(BaseModel):
42
- """Base class for defining structured output formats for OpenAI Assistants.
42
+ """Base class for structured output models with schema generation.
43
+
44
+ Provides Pydantic-based schema definition and serialization utilities
45
+ for OpenAI-compatible structured outputs. All structured data types
46
+ extend this class to ensure consistent validation, serialization, and
47
+ schema generation across the package.
43
48
 
44
- This class provides Pydantic-based schema definition and serialization
45
- helpers that support structured output formatting.
49
+ Supports automatic JSON schema generation, prompt formatting, and
50
+ conversion to/from OpenAI API formats for both Assistant and chat
51
+ completion APIs.
52
+
53
+ Attributes
54
+ ----------
55
+ DATA_PATH : Path or None, class attribute
56
+ Optional location for saving schema files. Set at class level
57
+ before calling save_schema_to_file.
46
58
 
47
59
  Methods
48
60
  -------
61
+ get_prompt(add_enum_values=True)
62
+ Format structured prompt lines into a single output string.
63
+ get_input_prompt_list(add_enum_values=True)
64
+ Build a structured prompt including inherited fields.
49
65
  assistant_format()
50
66
  Build a response format payload for Assistant APIs.
51
67
  assistant_tool_definition(name, description)
52
68
  Build a function tool definition payload for Assistant APIs.
53
- get_prompt(add_enum_values)
54
- Format structured prompt lines into a single output string.
55
- get_input_prompt_list(add_enum_values)
56
- Build a structured prompt including inherited fields.
57
- get_schema(force_required)
58
- Generate a JSON schema for the structure.
59
69
  response_format()
60
70
  Build a response format payload for chat completions.
61
71
  response_tool_definition(tool_name, tool_description)
62
72
  Build a function tool definition payload for chat completions.
63
- save_schema_to_file(force_required)
64
- Persist the schema to disk within the application data path.
73
+ get_schema()
74
+ Generate a JSON schema for the structure.
75
+ save_schema_to_file()
76
+ Persist the schema to disk within DATA_PATH.
65
77
  to_json()
66
78
  Serialize the structure to a JSON-compatible dictionary.
67
79
  to_json_file(filepath)
68
- Write the serialized payload to ``filepath``.
80
+ Write the serialized payload to a file.
69
81
  from_raw_input(data)
70
82
  Construct an instance from raw assistant tool-call arguments.
71
83
  format_output(label, value)
72
84
  Format a label/value pair for console output.
73
85
  schema_overrides()
74
- Produce ``Field`` overrides for dynamic schema customisation.
86
+ Produce Field overrides for dynamic schema customization.
75
87
  print()
76
88
  Return a string representation of the structure.
77
89
  console_print()
78
90
  Print the string representation to stdout.
91
+
92
+ Examples
93
+ --------
94
+ Define a custom structure:
95
+
96
+ >>> from openai_sdk_helpers.structure import BaseStructure, spec_field
97
+ >>> class MyOutput(BaseStructure):
98
+ ... title: str = spec_field("title", description="The title")
99
+ ... score: float = spec_field("score", description="Quality score")
100
+
101
+ Generate JSON schema:
102
+
103
+ >>> schema = MyOutput.get_schema()
104
+ >>> print(schema)
105
+
106
+ Create response format for chat completions:
107
+
108
+ >>> format_spec = MyOutput.response_format()
109
+
110
+ Serialize instance:
111
+
112
+ >>> instance = MyOutput(title="Test", score=0.95)
113
+ >>> json_dict = instance.to_json()
79
114
  """
80
115
 
81
116
  model_config = ConfigDict(
@@ -105,18 +140,28 @@ class BaseStructure(BaseModel):
105
140
 
106
141
  @classmethod
107
142
  def _get_all_fields(cls) -> dict[Any, Any]:
108
- """Collect all fields, including inherited ones, from the class hierarchy.
143
+ """Collect all fields from the class hierarchy including inherited ones.
144
+
145
+ Traverses the method resolution order (MRO) to gather fields from
146
+ all parent classes that inherit from BaseModel, ensuring inherited
147
+ fields are included in schema generation.
148
+
149
+ Results are computed once per class and cached for performance.
109
150
 
110
151
  Returns
111
152
  -------
112
153
  dict[Any, Any]
113
- Mapping of field names to model fields.
154
+ Mapping of field names to Pydantic ModelField instances.
114
155
  """
115
- fields = {}
116
- for base in reversed(cls.__mro__): # Traverse inheritance tree
117
- if issubclass(base, BaseModel) and hasattr(base, "model_fields"):
118
- fields.update(base.model_fields) # Merge fields from parent
119
- return fields
156
+ # Use class-level caching for performance
157
+ cache_attr = "_all_fields_cache"
158
+ if not hasattr(cls, cache_attr):
159
+ fields = {}
160
+ for base in reversed(cls.__mro__): # Traverse inheritance tree
161
+ if issubclass(base, BaseModel) and hasattr(base, "model_fields"):
162
+ fields.update(base.model_fields) # Merge fields from parent
163
+ setattr(cls, cache_attr, fields)
164
+ return getattr(cls, cache_attr)
120
165
 
121
166
  @classmethod
122
167
  def _get_field_prompt(
@@ -189,7 +234,10 @@ class BaseStructure(BaseModel):
189
234
 
190
235
  @classmethod
191
236
  def assistant_tool_definition(cls, name: str, description: str) -> dict:
192
- """Build an assistant function tool definition for this structure.
237
+ """Build an Assistant API function tool definition for this structure.
238
+
239
+ Creates a tool definition compatible with the OpenAI Assistant API,
240
+ using the structure's schema as the function parameters.
193
241
 
194
242
  Parameters
195
243
  ----------
@@ -201,7 +249,14 @@ class BaseStructure(BaseModel):
201
249
  Returns
202
250
  -------
203
251
  dict
204
- Assistant tool definition payload.
252
+ Assistant tool definition payload in OpenAI format.
253
+
254
+ Examples
255
+ --------
256
+ >>> tool = MyStructure.assistant_tool_definition(
257
+ ... "analyze_data",
258
+ ... "Analyze the provided data"
259
+ ... )
205
260
  """
206
261
  from .responses import assistant_tool_definition
207
262
 
@@ -209,12 +264,19 @@ class BaseStructure(BaseModel):
209
264
 
210
265
  @classmethod
211
266
  def assistant_format(cls) -> dict:
212
- """Build an assistant response format definition for this structure.
267
+ """Build an Assistant API response format definition.
268
+
269
+ Creates a response format specification that instructs the Assistant
270
+ API to return structured output matching this structure's schema.
213
271
 
214
272
  Returns
215
273
  -------
216
274
  dict
217
- Assistant response format definition.
275
+ Assistant response format definition in OpenAI format.
276
+
277
+ Examples
278
+ --------
279
+ >>> format_def = MyStructure.assistant_format()
218
280
  """
219
281
  from .responses import assistant_format
220
282
 
@@ -224,6 +286,9 @@ class BaseStructure(BaseModel):
224
286
  def response_tool_definition(cls, tool_name: str, tool_description: str) -> dict:
225
287
  """Build a chat completion tool definition for this structure.
226
288
 
289
+ Creates a function tool definition compatible with the chat
290
+ completions API, using the structure's schema as parameters.
291
+
227
292
  Parameters
228
293
  ----------
229
294
  tool_name : str
@@ -234,7 +299,14 @@ class BaseStructure(BaseModel):
234
299
  Returns
235
300
  -------
236
301
  dict
237
- Tool definition payload for chat completions.
302
+ Tool definition payload for chat completions API.
303
+
304
+ Examples
305
+ --------
306
+ >>> tool = MyStructure.response_tool_definition(
307
+ ... "process_data",
308
+ ... "Process the input data"
309
+ ... )
238
310
  """
239
311
  from .responses import response_tool_definition
240
312
 
@@ -244,10 +316,23 @@ class BaseStructure(BaseModel):
244
316
  def response_format(cls) -> ResponseTextConfigParam:
245
317
  """Build a chat completion response format for this structure.
246
318
 
319
+ Creates a response format specification that instructs the chat
320
+ completions API to return structured output matching this
321
+ structure's schema.
322
+
247
323
  Returns
248
324
  -------
249
325
  ResponseTextConfigParam
250
- Response format definition.
326
+ Response format definition for chat completions API.
327
+
328
+ Examples
329
+ --------
330
+ >>> format_spec = MyStructure.response_format()
331
+ >>> response = client.chat.completions.create(
332
+ ... model="gpt-4",
333
+ ... messages=[...],
334
+ ... response_format=format_spec
335
+ ... )
251
336
  """
252
337
  from .responses import response_format
253
338
 
@@ -255,25 +340,33 @@ class BaseStructure(BaseModel):
255
340
 
256
341
  @classmethod
257
342
  def get_schema(cls) -> dict[str, Any]:
258
- """Generate a JSON schema for the class.
343
+ """Generate a JSON schema for this structure.
259
344
 
260
- All object properties are marked as required to produce fully specified
261
- schemas. Fields with a default value of ``None`` are treated as nullable
262
- and gain an explicit ``null`` entry in the resulting schema.
263
-
264
- Parameters
265
- ----------
266
- force_required : bool, default=False
267
- Retained for compatibility; all schemas declare required properties.
345
+ Produces a complete JSON schema with all properties marked as
346
+ required. Fields with default value of None are treated as nullable
347
+ and include an explicit null type in the schema.
268
348
 
269
349
  Returns
270
350
  -------
271
351
  dict[str, Any]
272
- JSON schema describing the structure.
352
+ JSON schema describing the structure in JSON Schema format.
353
+
354
+ Notes
355
+ -----
356
+ The schema generation automatically:
357
+ - Marks all object properties as required
358
+ - Adds null type for fields with None default
359
+ - Cleans up $ref entries for better compatibility
360
+ - Recursively processes nested structures
361
+
362
+ Examples
363
+ --------
364
+ >>> schema = MyStructure.get_schema()
365
+ >>> print(json.dumps(schema, indent=2))
273
366
  """
274
367
  schema = cls.model_json_schema()
275
368
 
276
- def clean_refs(obj):
369
+ def clean_refs(obj: Any) -> Any:
277
370
  if isinstance(obj, dict):
278
371
  if "$ref" in obj:
279
372
  for key in list(obj.keys()):
@@ -286,7 +379,7 @@ class BaseStructure(BaseModel):
286
379
  clean_refs(item)
287
380
  return obj
288
381
 
289
- cleaned_schema = cast(Dict[str, Any], clean_refs(schema))
382
+ cleaned_schema = cast(dict[str, Any], clean_refs(schema))
290
383
 
291
384
  def add_required_fields(target: dict[str, Any]) -> None:
292
385
  """Ensure every object declares its required properties."""
@@ -335,21 +428,28 @@ class BaseStructure(BaseModel):
335
428
 
336
429
  @classmethod
337
430
  def save_schema_to_file(cls) -> Path:
338
- """
339
- Save the generated JSON schema to a file.
340
-
341
- The schema is generated using :meth:`get_schema` and saved in the
342
- application's data path.
431
+ """Save the generated JSON schema to a file.
343
432
 
344
- Parameters
345
- ----------
346
- force_required : bool, default=False
347
- When ``True``, mark all object properties as required.
433
+ Generates the schema using get_schema and saves it to a JSON file
434
+ within the DATA_PATH directory. The filename is derived from the
435
+ class name.
348
436
 
349
437
  Returns
350
438
  -------
351
439
  Path
352
- Path to the saved schema file.
440
+ Absolute path to the saved schema file.
441
+
442
+ Raises
443
+ ------
444
+ RuntimeError
445
+ If DATA_PATH is not set on the class.
446
+
447
+ Examples
448
+ --------
449
+ >>> MyStructure.DATA_PATH = Path("./schemas")
450
+ >>> schema_path = MyStructure.save_schema_to_file()
451
+ >>> print(schema_path)
452
+ PosixPath('./schemas/MyStructure_schema.json')
353
453
  """
354
454
  schema = cls.get_schema()
355
455
  if cls.DATA_PATH is None:
@@ -362,17 +462,23 @@ class BaseStructure(BaseModel):
362
462
  json.dump(schema, file_handle, indent=2, ensure_ascii=False)
363
463
  return file_path
364
464
 
365
- def to_json(self) -> Dict[str, Any]:
366
- """
367
- Serialize the Pydantic model instance to a JSON-compatible dictionary.
465
+ def to_json(self) -> dict[str, Any]:
466
+ """Serialize the instance to a JSON-compatible dictionary.
368
467
 
369
- Enum members are converted to their values. Lists and nested dictionaries
370
- are recursively processed.
468
+ Converts the Pydantic model instance to a dictionary suitable for
469
+ JSON serialization. Enum members are converted to their values,
470
+ and nested structures are recursively processed.
371
471
 
372
472
  Returns
373
473
  -------
374
474
  dict[str, Any]
375
- Model instance serialized as a dictionary.
475
+ Model instance serialized as a dictionary with JSON-compatible types.
476
+
477
+ Examples
478
+ --------
479
+ >>> instance = MyStructure(title="Test", score=0.95)
480
+ >>> data = instance.to_json()
481
+ >>> print(json.dumps(data))
376
482
  """
377
483
 
378
484
  def convert(obj: Any) -> Any:
@@ -395,16 +501,19 @@ class BaseStructure(BaseModel):
395
501
  if annotation is None:
396
502
  return False
397
503
 
398
- origins_to_match = {list, List, Sequence, tuple, set}
504
+ origins_to_match = {list, Sequence, tuple, set}
399
505
 
400
506
  origin = get_origin(annotation)
401
507
  if origin in origins_to_match or annotation in origins_to_match:
402
508
  return True
403
509
 
404
- if origin is Union:
510
+ # Check for Union types (e.g., list[str] | None)
511
+ if origin is not None:
512
+ # Handle Union by checking args
513
+ args = get_args(annotation)
405
514
  return any(
406
515
  get_origin(arg) in origins_to_match or arg in origins_to_match
407
- for arg in get_args(annotation)
516
+ for arg in args
408
517
  )
409
518
  return False
410
519
 
@@ -444,21 +553,20 @@ class BaseStructure(BaseModel):
444
553
  return filepath
445
554
 
446
555
  @classmethod
447
- def _extract_enum_class(cls, field_type: Any) -> Optional[Type[Enum]]:
448
- """
449
- Extract an Enum class from a field's type annotation.
556
+ def _extract_enum_class(cls, field_type: Any) -> type[Enum] | None:
557
+ """Extract an Enum class from a field's type annotation.
450
558
 
451
- Handles direct Enum types, List[Enum], and Optional[Enum] (via Union).
559
+ Handles direct Enum types, list[Enum], and optional Enums.
452
560
 
453
561
  Parameters
454
562
  ----------
455
- field_type
563
+ field_type : Any
456
564
  Type annotation of a field.
457
565
 
458
566
  Returns
459
567
  -------
460
568
  type[Enum] or None
461
- Enum class if found, otherwise ``None``.
569
+ Enum class if found, otherwise None.
462
570
  """
463
571
  origin = get_origin(field_type)
464
572
  args = get_args(field_type)
@@ -466,13 +574,14 @@ class BaseStructure(BaseModel):
466
574
  if inspect.isclass(field_type) and issubclass(field_type, Enum):
467
575
  return field_type
468
576
  elif (
469
- origin in {list, List}
577
+ origin is list
470
578
  and args
471
579
  and inspect.isclass(args[0])
472
580
  and issubclass(args[0], Enum)
473
581
  ):
474
582
  return args[0]
475
- elif origin is Union:
583
+ elif origin is not None:
584
+ # Handle Union types
476
585
  for arg in args:
477
586
  enum_cls = cls._extract_enum_class(arg)
478
587
  if enum_cls:
@@ -480,18 +589,18 @@ class BaseStructure(BaseModel):
480
589
  return None
481
590
 
482
591
  @classmethod
483
- def _build_enum_field_mapping(cls) -> dict[str, Type[Enum]]:
484
- """
485
- Build a mapping from field names to their Enum classes.
592
+ def _build_enum_field_mapping(cls) -> dict[str, type[Enum]]:
593
+ """Build a mapping from field names to their Enum classes.
486
594
 
487
- This is used by `from_raw_input` to correctly process enum values.
595
+ Used by from_raw_input to correctly process enum values from
596
+ raw API responses.
488
597
 
489
598
  Returns
490
599
  -------
491
600
  dict[str, type[Enum]]
492
601
  Mapping of field names to Enum types.
493
602
  """
494
- mapping: dict[str, Type[Enum]] = {}
603
+ mapping: dict[str, type[Enum]] = {}
495
604
 
496
605
  for name, model_field in cls.model_fields.items():
497
606
  field_type = model_field.annotation
@@ -503,24 +612,27 @@ class BaseStructure(BaseModel):
503
612
  return mapping
504
613
 
505
614
  @classmethod
506
- def from_raw_input(cls: Type[T], data: dict) -> T:
507
- """
508
- Construct an instance of the class from a dictionary of raw input data.
615
+ def from_raw_input(cls: type[T], data: dict) -> T:
616
+ """Construct an instance from a dictionary of raw input data.
509
617
 
510
- This method is particularly useful for converting data received from an
511
- OpenAI Assistant (e.g., tool call arguments) into a Pydantic model.
512
- It handles the conversion of string values to Enum members for fields
513
- typed as Enum or List[Enum]. Warnings are logged for invalid enum values.
618
+ Particularly useful for converting data from OpenAI API tool calls
619
+ or assistant outputs into validated structure instances. Handles
620
+ enum value conversion automatically.
514
621
 
515
622
  Parameters
516
623
  ----------
517
624
  data : dict
518
- Raw input data payload.
625
+ Raw input data dictionary from API response.
519
626
 
520
627
  Returns
521
628
  -------
522
629
  T
523
- Instance populated with the processed data.
630
+ Validated instance of the structure class.
631
+
632
+ Examples
633
+ --------
634
+ >>> raw_data = {"title": "Test", "score": 0.95}
635
+ >>> instance = MyStructure.from_raw_input(raw_data)
524
636
  """
525
637
  mapping = cls._build_enum_field_mapping()
526
638
  clean_data = data.copy()
@@ -570,6 +682,48 @@ class BaseStructure(BaseModel):
570
682
 
571
683
  return cls(**clean_data)
572
684
 
685
+ @classmethod
686
+ def from_tool_arguments(cls: type[T], arguments: str) -> T:
687
+ """Parse tool call arguments which may not be valid JSON.
688
+
689
+ The OpenAI API is expected to return well-formed JSON for tool arguments,
690
+ but minor formatting issues (such as the use of single quotes) can occur.
691
+ This helper first tries ``json.loads`` and falls back to
692
+ ``ast.literal_eval`` for simple cases.
693
+
694
+ Parameters
695
+ ----------
696
+ arguments
697
+ Raw argument string from the tool call.
698
+
699
+ Returns
700
+ -------
701
+ dict
702
+ Parsed dictionary of arguments.
703
+
704
+ Raises
705
+ ------
706
+ ValueError
707
+ If the arguments cannot be parsed as JSON.
708
+
709
+ Examples
710
+ --------
711
+ >>> parse_tool_arguments('{"key": "value"}')["key"]
712
+ 'value'
713
+ """
714
+ try:
715
+ structured_data = json.loads(arguments)
716
+
717
+ except json.JSONDecodeError:
718
+ try:
719
+ structured_data = ast.literal_eval(arguments)
720
+ except (SyntaxError, ValueError) as exc:
721
+ raise ValueError(
722
+ f"Invalid JSON arguments: {arguments}. "
723
+ f"Expected valid JSON or Python literal."
724
+ ) from exc
725
+ return cls.from_raw_input(structured_data)
726
+
573
727
  @staticmethod
574
728
  def format_output(label: str, value: Any) -> str:
575
729
  """
@@ -596,7 +750,7 @@ class BaseStructure(BaseModel):
596
750
  return f"- {label}: {str(value)}"
597
751
 
598
752
  @classmethod
599
- def schema_overrides(cls) -> Dict[str, Any]:
753
+ def schema_overrides(cls) -> dict[str, Any]:
600
754
  """
601
755
  Generate Pydantic ``Field`` overrides.
602
756
 
@@ -688,7 +842,7 @@ def spec_field(
688
842
  Any
689
843
  Pydantic ``Field`` configured with a default title and null behavior.
690
844
  """
691
- field_kwargs: Dict[str, Any] = {"title": name.replace("_", " ").title()}
845
+ field_kwargs: dict[str, Any] = {"title": name.replace("_", " ").title()}
692
846
  field_kwargs.update(overrides)
693
847
 
694
848
  base_description = field_kwargs.pop("description", description)
@@ -1,4 +1,18 @@
1
- """Structured output models for agent tasks and plans."""
1
+ """Structured output models for agent tasks and plans.
2
+
3
+ This package provides Pydantic models for representing agent execution plans,
4
+ including task definitions, agent type enumerations, and plan structures with
5
+ sequential execution support.
6
+
7
+ Classes
8
+ -------
9
+ PlanStructure
10
+ Ordered list of agent tasks with execution capabilities.
11
+ TaskStructure
12
+ Individual agent task with status tracking and results.
13
+ AgentEnum
14
+ Enumeration of available agent types.
15
+ """
2
16
 
3
17
  from __future__ import annotations
4
18
 
@@ -1,19 +1,55 @@
1
- """Agent task enumeration definitions."""
1
+ """Agent task enumeration definitions.
2
2
 
3
- from __future__ import annotations
3
+ This module defines enumerations for agent types that can be assigned to
4
+ tasks within an execution plan.
5
+ """
4
6
 
5
- from typing import Any
7
+ from __future__ import annotations
6
8
 
7
9
  from ...enums.base import CrosswalkJSONEnum
8
10
 
9
11
 
10
12
  class AgentEnum(CrosswalkJSONEnum):
11
- """Auto-generated enumeration for AgentEnum.
13
+ """Enumeration of available agent types for task execution.
14
+
15
+ Defines all supported agent types that can be assigned to tasks in
16
+ a plan. Each enum value corresponds to a specific agent implementation.
17
+
18
+ Attributes
19
+ ----------
20
+ WEB_SEARCH : str
21
+ Web search agent for retrieving information from the internet.
22
+ VECTOR_SEARCH : str
23
+ Vector search agent for semantic similarity search.
24
+ DATA_ANALYST : str
25
+ Data analysis agent for processing and analyzing data.
26
+ SUMMARIZER : str
27
+ Summarization agent for condensing information.
28
+ TRANSLATOR : str
29
+ Translation agent for language conversion.
30
+ VALIDATOR : str
31
+ Validation agent for checking constraints and guardrails.
32
+ PLANNER : str
33
+ Meta-planning agent for generating execution plans.
34
+ DESIGNER : str
35
+ Agent design agent for creating agent specifications.
36
+ BUILDER : str
37
+ Agent builder for constructing agent implementations.
38
+ EVALUATOR : str
39
+ Evaluation agent for assessing outputs and performance.
40
+ RELEASE_MANAGER : str
41
+ Release management agent for deployment coordination.
12
42
 
13
43
  Methods
14
44
  -------
15
45
  CROSSWALK()
16
46
  Return the raw crosswalk data for this enum.
47
+
48
+ Examples
49
+ --------
50
+ >>> agent_type = AgentEnum.WEB_SEARCH
51
+ >>> print(agent_type.value)
52
+ 'WebAgentSearch'
17
53
  """
18
54
 
19
55
  WEB_SEARCH = "WebAgentSearch"
@@ -29,7 +65,7 @@ class AgentEnum(CrosswalkJSONEnum):
29
65
  RELEASE_MANAGER = "ReleaseManager"
30
66
 
31
67
  @classmethod
32
- def CROSSWALK(cls) -> dict[str, dict[str, Any]]:
68
+ def CROSSWALK(cls) -> dict[str, dict[str, str]]:
33
69
  """Return the raw crosswalk data for this enum.
34
70
 
35
71
  Returns