mirascope 2.0.1__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. mirascope/_stubs.py +39 -18
  2. mirascope/_utils.py +34 -0
  3. mirascope/api/_generated/__init__.py +4 -0
  4. mirascope/api/_generated/organization_invitations/client.py +2 -2
  5. mirascope/api/_generated/organization_invitations/raw_client.py +2 -2
  6. mirascope/api/_generated/project_memberships/__init__.py +4 -0
  7. mirascope/api/_generated/project_memberships/client.py +91 -0
  8. mirascope/api/_generated/project_memberships/raw_client.py +239 -0
  9. mirascope/api/_generated/project_memberships/types/__init__.py +4 -0
  10. mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
  11. mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
  12. mirascope/api/_generated/reference.md +73 -1
  13. mirascope/llm/__init__.py +19 -0
  14. mirascope/llm/calls/calls.py +28 -21
  15. mirascope/llm/calls/decorator.py +17 -24
  16. mirascope/llm/formatting/__init__.py +2 -2
  17. mirascope/llm/formatting/format.py +2 -4
  18. mirascope/llm/formatting/types.py +19 -2
  19. mirascope/llm/models/models.py +66 -146
  20. mirascope/llm/prompts/decorator.py +5 -16
  21. mirascope/llm/prompts/prompts.py +35 -38
  22. mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -7
  23. mirascope/llm/providers/anthropic/_utils/beta_encode.py +22 -16
  24. mirascope/llm/providers/anthropic/_utils/decode.py +45 -7
  25. mirascope/llm/providers/anthropic/_utils/encode.py +28 -15
  26. mirascope/llm/providers/anthropic/beta_provider.py +33 -69
  27. mirascope/llm/providers/anthropic/provider.py +52 -91
  28. mirascope/llm/providers/base/_utils.py +4 -9
  29. mirascope/llm/providers/base/base_provider.py +89 -205
  30. mirascope/llm/providers/google/_utils/decode.py +51 -1
  31. mirascope/llm/providers/google/_utils/encode.py +38 -21
  32. mirascope/llm/providers/google/provider.py +33 -69
  33. mirascope/llm/providers/mirascope/provider.py +25 -61
  34. mirascope/llm/providers/mlx/encoding/base.py +3 -6
  35. mirascope/llm/providers/mlx/encoding/transformers.py +4 -8
  36. mirascope/llm/providers/mlx/mlx.py +9 -21
  37. mirascope/llm/providers/mlx/provider.py +33 -69
  38. mirascope/llm/providers/openai/completions/_utils/encode.py +39 -20
  39. mirascope/llm/providers/openai/completions/base_provider.py +34 -75
  40. mirascope/llm/providers/openai/provider.py +25 -61
  41. mirascope/llm/providers/openai/responses/_utils/decode.py +31 -2
  42. mirascope/llm/providers/openai/responses/_utils/encode.py +32 -17
  43. mirascope/llm/providers/openai/responses/provider.py +34 -75
  44. mirascope/llm/responses/__init__.py +2 -1
  45. mirascope/llm/responses/base_stream_response.py +4 -0
  46. mirascope/llm/responses/response.py +8 -12
  47. mirascope/llm/responses/stream_response.py +8 -12
  48. mirascope/llm/responses/usage.py +44 -0
  49. mirascope/llm/tools/__init__.py +24 -0
  50. mirascope/llm/tools/provider_tools.py +18 -0
  51. mirascope/llm/tools/tool_schema.py +11 -4
  52. mirascope/llm/tools/toolkit.py +24 -6
  53. mirascope/llm/tools/types.py +112 -0
  54. mirascope/llm/tools/web_search_tool.py +32 -0
  55. mirascope/ops/__init__.py +19 -1
  56. mirascope/ops/_internal/closure.py +4 -1
  57. mirascope/ops/_internal/exporters/exporters.py +13 -46
  58. mirascope/ops/_internal/exporters/utils.py +37 -0
  59. mirascope/ops/_internal/instrumentation/__init__.py +20 -0
  60. mirascope/ops/_internal/instrumentation/llm/common.py +19 -49
  61. mirascope/ops/_internal/instrumentation/llm/model.py +61 -82
  62. mirascope/ops/_internal/instrumentation/llm/serialize.py +36 -12
  63. mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
  64. mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
  65. mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
  66. mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
  67. mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
  68. mirascope/ops/_internal/traced_calls.py +14 -0
  69. mirascope/ops/_internal/traced_functions.py +7 -2
  70. mirascope/ops/_internal/utils.py +12 -4
  71. mirascope/ops/_internal/versioned_functions.py +1 -1
  72. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/METADATA +96 -68
  73. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/RECORD +75 -64
  74. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/WHEEL +0 -0
  75. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -1682,7 +1682,7 @@ client.organization_invitations.create(
1682
1682
  <dl>
1683
1683
  <dd>
1684
1684
 
1685
- **recipient_email:** `str` — a string matching the pattern ^[^\s@]+@[^\s@]+\.[^\s@]+$
1685
+ **recipient_email:** `str` — a string matching the pattern ^[^ \t\n\r\f\v@]+@[^ \t\n\r\f\v@]+[.][^ \t\n\r\f\v@]+$
1686
1686
 
1687
1687
  </dd>
1688
1688
  </dl>
@@ -2616,6 +2616,78 @@ client.project_memberships.create(
2616
2616
  </dl>
2617
2617
 
2618
2618
 
2619
+ </dd>
2620
+ </dl>
2621
+ </details>
2622
+
2623
+ <details><summary><code>client.project_memberships.<a href="src/mirascope/project_memberships/client.py">get</a>(...)</code></summary>
2624
+ <dl>
2625
+ <dd>
2626
+
2627
+ #### 🔌 Usage
2628
+
2629
+ <dl>
2630
+ <dd>
2631
+
2632
+ <dl>
2633
+ <dd>
2634
+
2635
+ ```python
2636
+ from mirascope.api._generated import Mirascope
2637
+
2638
+ client = Mirascope()
2639
+ client.project_memberships.get(
2640
+ organization_id="organizationId",
2641
+ project_id="projectId",
2642
+ member_id="memberId",
2643
+ )
2644
+
2645
+ ```
2646
+ </dd>
2647
+ </dl>
2648
+ </dd>
2649
+ </dl>
2650
+
2651
+ #### ⚙️ Parameters
2652
+
2653
+ <dl>
2654
+ <dd>
2655
+
2656
+ <dl>
2657
+ <dd>
2658
+
2659
+ **organization_id:** `str`
2660
+
2661
+ </dd>
2662
+ </dl>
2663
+
2664
+ <dl>
2665
+ <dd>
2666
+
2667
+ **project_id:** `str`
2668
+
2669
+ </dd>
2670
+ </dl>
2671
+
2672
+ <dl>
2673
+ <dd>
2674
+
2675
+ **member_id:** `str`
2676
+
2677
+ </dd>
2678
+ </dl>
2679
+
2680
+ <dl>
2681
+ <dd>
2682
+
2683
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
2684
+
2685
+ </dd>
2686
+ </dl>
2687
+ </dd>
2688
+ </dl>
2689
+
2690
+
2619
2691
  </dd>
2620
2692
  </dl>
2621
2693
  </details>
mirascope/llm/__init__.py CHANGED
@@ -75,6 +75,7 @@ from .exceptions import (
75
75
  )
76
76
  from .formatting import (
77
77
  Format,
78
+ FormatSpec,
78
79
  FormattableT,
79
80
  FormattingMode,
80
81
  OutputParser,
@@ -130,6 +131,7 @@ from .responses import (
130
131
  ContextResponse,
131
132
  ContextStreamResponse,
132
133
  FinishReason,
134
+ ProviderToolUsage,
133
135
  RawMessageChunk,
134
136
  Response,
135
137
  RootResponse,
@@ -144,18 +146,25 @@ from .responses import (
144
146
  )
145
147
  from .tools import (
146
148
  AnyToolFn,
149
+ AnyTools,
147
150
  AnyToolSchema,
148
151
  AsyncContextTool,
149
152
  AsyncContextToolkit,
153
+ AsyncContextTools,
150
154
  AsyncTool,
151
155
  AsyncToolkit,
156
+ AsyncTools,
152
157
  BaseToolkit,
153
158
  ContextTool,
154
159
  ContextToolkit,
160
+ ContextTools,
161
+ ProviderTool,
155
162
  Tool,
156
163
  Toolkit,
157
164
  ToolkitT,
165
+ Tools,
158
166
  ToolSchema,
167
+ WebSearchTool,
159
168
  tool,
160
169
  )
161
170
  from .types import Jsonable
@@ -165,6 +174,7 @@ __all__ = [
165
174
  "AnyResponse",
166
175
  "AnyToolFn",
167
176
  "AnyToolSchema",
177
+ "AnyTools",
168
178
  "AssistantContent",
169
179
  "AssistantContentChunk",
170
180
  "AssistantContentPart",
@@ -177,6 +187,7 @@ __all__ = [
177
187
  "AsyncContextStreamResponse",
178
188
  "AsyncContextTool",
179
189
  "AsyncContextToolkit",
190
+ "AsyncContextTools",
180
191
  "AsyncPrompt",
181
192
  "AsyncResponse",
182
193
  "AsyncStream",
@@ -186,6 +197,7 @@ __all__ = [
186
197
  "AsyncTool",
187
198
  "AsyncToolCallStream",
188
199
  "AsyncToolkit",
200
+ "AsyncTools",
189
201
  "Audio",
190
202
  "AuthenticationError",
191
203
  "BadRequestError",
@@ -203,12 +215,14 @@ __all__ = [
203
215
  "ContextStreamResponse",
204
216
  "ContextTool",
205
217
  "ContextToolkit",
218
+ "ContextTools",
206
219
  "DepsT",
207
220
  "Document",
208
221
  "Error",
209
222
  "FeatureNotSupportedError",
210
223
  "FinishReason",
211
224
  "Format",
225
+ "FormatSpec",
212
226
  "FormattableT",
213
227
  "FormattingMode",
214
228
  "Image",
@@ -229,6 +243,8 @@ __all__ = [
229
243
  "Provider",
230
244
  "ProviderError",
231
245
  "ProviderId",
246
+ "ProviderTool",
247
+ "ProviderToolUsage",
232
248
  "RateLimitError",
233
249
  "RawMessageChunk",
234
250
  "Response",
@@ -266,12 +282,15 @@ __all__ = [
266
282
  "ToolSchema",
267
283
  "Toolkit",
268
284
  "ToolkitT",
285
+ "Tools",
269
286
  "URLImageSource",
270
287
  "Usage",
271
288
  "UsageDeltaChunk",
272
289
  "UserContent",
273
290
  "UserContentPart",
274
291
  "UserMessage",
292
+ "WebSearchTool",
293
+ "WebSearchTool",
275
294
  "call",
276
295
  "calls",
277
296
  "content",
@@ -1,8 +1,10 @@
1
1
  """The Call module for generating responses using LLMs."""
2
2
 
3
- from dataclasses import dataclass
4
- from typing import Generic, TypeVar, overload
3
+ from collections.abc import Callable
4
+ from dataclasses import dataclass, field
5
+ from typing import Any, Generic, TypeVar, overload
5
6
 
7
+ from ..._utils import copy_function_metadata
6
8
  from ..context import Context, DepsT
7
9
  from ..formatting import FormattableT
8
10
  from ..models import Model, use_model
@@ -12,6 +14,7 @@ from ..prompts import (
12
14
  ContextPrompt,
13
15
  Prompt,
14
16
  )
17
+ from ..prompts.prompts import BasePrompt
15
18
  from ..responses import (
16
19
  AsyncContextResponse,
17
20
  AsyncContextStreamResponse,
@@ -24,24 +27,35 @@ from ..responses import (
24
27
  )
25
28
  from ..types import P
26
29
 
27
- CallT = TypeVar("CallT", bound="BaseCall")
30
+ PromptT = TypeVar("PromptT", bound=BasePrompt[Callable[..., Any]])
31
+ CallT = TypeVar("CallT", bound="BaseCall[Any]")
28
32
 
29
33
 
30
- @dataclass
31
- class BaseCall:
34
+ @dataclass(kw_only=True)
35
+ class BaseCall(Generic[PromptT]):
32
36
  """Base class for all Call types with shared model functionality."""
33
37
 
34
38
  default_model: Model
35
39
  """The default model that will be used if no model is set in context."""
36
40
 
41
+ prompt: PromptT
42
+ """The underlying Prompt instance that generates messages with tools and format."""
43
+
44
+ __name__: str = field(init=False, repr=False, default="")
45
+ """The name of the underlying function (preserved for decorator stacking)."""
46
+
37
47
  @property
38
48
  def model(self) -> Model:
39
49
  """The model used for generating responses. May be overwritten via `with llm.model(...)`."""
40
50
  return use_model(self.default_model)
41
51
 
52
+ def __post_init__(self) -> None:
53
+ """Preserve standard function attributes for decorator stacking."""
54
+ copy_function_metadata(self, self.prompt.fn)
55
+
42
56
 
43
57
  @dataclass
44
- class Call(BaseCall, Generic[P, FormattableT]):
58
+ class Call(BaseCall[Prompt[P, FormattableT]], Generic[P, FormattableT]):
45
59
  """A call that directly generates LLM responses without requiring a model argument.
46
60
 
47
61
  Created by decorating a `MessageTemplate` with `llm.call`. The decorated function
@@ -53,9 +67,6 @@ class Call(BaseCall, Generic[P, FormattableT]):
53
67
  The model can be overridden at runtime using `with llm.model(...)` context manager.
54
68
  """
55
69
 
56
- prompt: Prompt[P, FormattableT]
57
- """The underlying Prompt instance that generates messages with tools and format."""
58
-
59
70
  @overload
60
71
  def __call__(
61
72
  self: "Call[P, None]", *args: P.args, **kwargs: P.kwargs
@@ -104,7 +115,7 @@ class Call(BaseCall, Generic[P, FormattableT]):
104
115
 
105
116
 
106
117
  @dataclass
107
- class AsyncCall(BaseCall, Generic[P, FormattableT]):
118
+ class AsyncCall(BaseCall[AsyncPrompt[P, FormattableT]], Generic[P, FormattableT]):
108
119
  """An async call that directly generates LLM responses without requiring a model argument.
109
120
 
110
121
  Created by decorating an async `MessageTemplate` with `llm.call`. The decorated async
@@ -116,9 +127,6 @@ class AsyncCall(BaseCall, Generic[P, FormattableT]):
116
127
  The model can be overridden at runtime using `with llm.model(...)` context manager.
117
128
  """
118
129
 
119
- prompt: AsyncPrompt[P, FormattableT]
120
- """The underlying AsyncPrompt instance that generates messages with tools and format."""
121
-
122
130
  @overload
123
131
  async def __call__(
124
132
  self: "AsyncCall[P, None]", *args: P.args, **kwargs: P.kwargs
@@ -169,7 +177,9 @@ class AsyncCall(BaseCall, Generic[P, FormattableT]):
169
177
 
170
178
 
171
179
  @dataclass
172
- class ContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
180
+ class ContextCall(
181
+ BaseCall[ContextPrompt[P, DepsT, FormattableT]], Generic[P, DepsT, FormattableT]
182
+ ):
173
183
  """A context-aware call that directly generates LLM responses without requiring a model argument.
174
184
 
175
185
  Created by decorating a `ContextMessageTemplate` with `llm.call`. The decorated function
@@ -182,9 +192,6 @@ class ContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
182
192
  The model can be overridden at runtime using `with llm.model(...)` context manager.
183
193
  """
184
194
 
185
- prompt: ContextPrompt[P, DepsT, FormattableT]
186
- """The underlying ContextPrompt instance that generates messages with tools and format."""
187
-
188
195
  @overload
189
196
  def __call__(
190
197
  self: "ContextCall[P, DepsT, None]",
@@ -255,7 +262,10 @@ class ContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
255
262
 
256
263
 
257
264
  @dataclass
258
- class AsyncContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
265
+ class AsyncContextCall(
266
+ BaseCall[AsyncContextPrompt[P, DepsT, FormattableT]],
267
+ Generic[P, DepsT, FormattableT],
268
+ ):
259
269
  """An async context-aware call that directly generates LLM responses without requiring a model argument.
260
270
 
261
271
  Created by decorating an async `ContextMessageTemplate` with `llm.call`. The decorated async
@@ -268,9 +278,6 @@ class AsyncContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
268
278
  The model can be overridden at runtime using `with llm.model(...)` context manager.
269
279
  """
270
280
 
271
- prompt: AsyncContextPrompt[P, DepsT, FormattableT]
272
- """The underlying AsyncContextPrompt instance that generates messages with tools and format."""
273
-
274
281
  @overload
275
282
  async def __call__(
276
283
  self: "AsyncContextCall[P, DepsT, None]",
@@ -8,7 +8,7 @@ from typing import TYPE_CHECKING, Generic, cast, overload
8
8
  from typing_extensions import Unpack
9
9
 
10
10
  from ..context import DepsT
11
- from ..formatting import Format, FormattableT, OutputParser
11
+ from ..formatting import FormatSpec, FormattableT
12
12
  from ..models import Model
13
13
  from ..prompts import (
14
14
  AsyncContextMessageTemplate,
@@ -29,6 +29,7 @@ from ..tools import (
29
29
  AsyncToolkit,
30
30
  ContextTool,
31
31
  ContextToolkit,
32
+ ProviderTool,
32
33
  Tool,
33
34
  Toolkit,
34
35
  ToolT,
@@ -58,12 +59,10 @@ class CallDecorator(Generic[ToolT, FormattableT]):
58
59
  model: Model
59
60
  """The default model to use with this call. May be overridden."""
60
61
 
61
- tools: Sequence[ToolT] | None
62
+ tools: Sequence[ToolT | ProviderTool] | None
62
63
  """The tools that are included in the prompt, if any."""
63
64
 
64
- format: (
65
- type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT] | None
66
- )
65
+ format: FormatSpec[FormattableT] | None
67
66
  """The structured output format off the prompt, if any."""
68
67
 
69
68
  @overload
@@ -114,7 +113,8 @@ class CallDecorator(Generic[ToolT, FormattableT]):
114
113
 
115
114
  if is_context and is_async:
116
115
  tools = cast(
117
- Sequence[AsyncTool | AsyncContextTool[DepsT]] | None, self.tools
116
+ Sequence[AsyncTool | AsyncContextTool[DepsT] | ProviderTool] | None,
117
+ self.tools,
118
118
  )
119
119
  prompt = AsyncContextPrompt(
120
120
  fn=fn,
@@ -126,7 +126,9 @@ class CallDecorator(Generic[ToolT, FormattableT]):
126
126
  default_model=self.model,
127
127
  )
128
128
  elif is_context:
129
- tools = cast(Sequence[Tool | ContextTool[DepsT]] | None, self.tools)
129
+ tools = cast(
130
+ Sequence[Tool | ContextTool[DepsT] | ProviderTool] | None, self.tools
131
+ )
130
132
  prompt = ContextPrompt(
131
133
  fn=fn,
132
134
  toolkit=ContextToolkit(tools=tools),
@@ -137,7 +139,7 @@ class CallDecorator(Generic[ToolT, FormattableT]):
137
139
  default_model=self.model,
138
140
  )
139
141
  elif is_async:
140
- tools = cast(Sequence[AsyncTool] | None, self.tools)
142
+ tools = cast(Sequence[AsyncTool | ProviderTool] | None, self.tools)
141
143
  prompt = AsyncPrompt(
142
144
  fn=fn, toolkit=AsyncToolkit(tools=tools), format=self.format
143
145
  )
@@ -146,7 +148,7 @@ class CallDecorator(Generic[ToolT, FormattableT]):
146
148
  default_model=self.model,
147
149
  )
148
150
  else:
149
- tools = cast(Sequence[Tool] | None, self.tools)
151
+ tools = cast(Sequence[Tool | ProviderTool] | None, self.tools)
150
152
  prompt = Prompt(fn=fn, toolkit=Toolkit(tools=tools), format=self.format)
151
153
  return Call(
152
154
  prompt=prompt,
@@ -158,11 +160,8 @@ class CallDecorator(Generic[ToolT, FormattableT]):
158
160
  def call(
159
161
  model: ModelId,
160
162
  *,
161
- tools: Sequence[ToolT] | None = None,
162
- format: type[FormattableT]
163
- | Format[FormattableT]
164
- | OutputParser[FormattableT]
165
- | None = None,
163
+ tools: Sequence[ToolT | ProviderTool] | None = None,
164
+ format: FormatSpec[FormattableT] | None = None,
166
165
  **params: Unpack[Params],
167
166
  ) -> CallDecorator[ToolT, FormattableT]:
168
167
  """Decorator for converting prompt functions into LLM calls.
@@ -176,11 +175,8 @@ def call(
176
175
  def call(
177
176
  model: Model,
178
177
  *,
179
- tools: Sequence[ToolT] | None = None,
180
- format: type[FormattableT]
181
- | Format[FormattableT]
182
- | OutputParser[FormattableT]
183
- | None = None,
178
+ tools: Sequence[ToolT | ProviderTool] | None = None,
179
+ format: FormatSpec[FormattableT] | None = None,
184
180
  ) -> CallDecorator[ToolT, FormattableT]:
185
181
  """Decorator for converting prompt functions into LLM calls.
186
182
 
@@ -192,11 +188,8 @@ def call(
192
188
  def call(
193
189
  model: ModelId | Model,
194
190
  *,
195
- tools: Sequence[ToolT] | None = None,
196
- format: type[FormattableT]
197
- | Format[FormattableT]
198
- | OutputParser[FormattableT]
199
- | None = None,
191
+ tools: Sequence[ToolT | ProviderTool] | None = None,
192
+ format: FormatSpec[FormattableT] | None = None,
200
193
  **params: Unpack[Params],
201
194
  ) -> CallDecorator[ToolT, FormattableT]:
202
195
  """Decorates a `MessageTemplate` to create a `Call` that can be invoked directly.
@@ -18,11 +18,11 @@ from .primitives import (
18
18
  create_wrapper_model,
19
19
  is_primitive_type,
20
20
  )
21
- from .types import FormattableT, FormattingMode
21
+ from .types import FormatSpec, FormattableT, FormattingMode
22
22
 
23
23
  __all__ = [
24
24
  "Format",
25
- "FormattableT",
25
+ "FormatSpec",
26
26
  "FormattableT",
27
27
  "FormattingMode",
28
28
  "FromCallArgs",
@@ -11,7 +11,7 @@ from ..tools import FORMAT_TOOL_NAME, ToolFn, ToolParameterSchema, ToolSchema
11
11
  from ..types import NoneType
12
12
  from .output_parser import OutputParser, is_output_parser
13
13
  from .primitives import create_wrapper_model, is_primitive_type
14
- from .types import FormattableT, FormattingMode, HasFormattingInstructions
14
+ from .types import FormatSpec, FormattableT, FormattingMode, HasFormattingInstructions
15
15
 
16
16
  TOOL_MODE_INSTRUCTIONS = f"""Always respond to the user's query using the {FORMAT_TOOL_NAME} tool for structured output."""
17
17
 
@@ -267,9 +267,7 @@ def format(
267
267
 
268
268
 
269
269
  def resolve_format(
270
- formattable: (
271
- type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT] | None
272
- ),
270
+ formattable: FormatSpec[FormattableT] | None,
273
271
  default_mode: FormattingMode,
274
272
  ) -> Format[FormattableT] | None:
275
273
  """Resolve a `Format` (or None) from a possible `Format`, Formattable, or `OutputParser`.
@@ -1,15 +1,32 @@
1
1
  """Type for the formatting module."""
2
2
 
3
- from typing import Literal, Protocol, runtime_checkable
4
- from typing_extensions import TypeVar
3
+ from typing import TYPE_CHECKING, Literal, Protocol, runtime_checkable
4
+ from typing_extensions import TypeAliasType, TypeVar
5
5
 
6
6
  from pydantic import BaseModel
7
7
 
8
8
  from .primitives import PrimitiveType
9
9
 
10
+ if TYPE_CHECKING:
11
+ from .format import Format
12
+ from .output_parser import OutputParser
13
+
10
14
  FormattableT = TypeVar(
11
15
  "FormattableT", bound=BaseModel | PrimitiveType | None, default=None
12
16
  )
17
+
18
+ FormatSpec = TypeAliasType(
19
+ "FormatSpec",
20
+ "type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT]",
21
+ type_params=(FormattableT,),
22
+ )
23
+ """Type alias for format parameter types.
24
+
25
+ A FormatSpec can be:
26
+ - A type (class) that represents the format schema (e.g., a Pydantic BaseModel)
27
+ - A Format wrapper that includes mode and other metadata
28
+ - An OutputParser for custom parsing logic
29
+ """
13
30
  """Type variable for structured response format types.
14
31
 
15
32
  This TypeVar represents the type of structured output format that LLM responses