mirascope 2.0.2__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. mirascope/_stubs.py +39 -18
  2. mirascope/api/_generated/__init__.py +4 -0
  3. mirascope/api/_generated/project_memberships/__init__.py +4 -0
  4. mirascope/api/_generated/project_memberships/client.py +91 -0
  5. mirascope/api/_generated/project_memberships/raw_client.py +239 -0
  6. mirascope/api/_generated/project_memberships/types/__init__.py +4 -0
  7. mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
  8. mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
  9. mirascope/api/_generated/reference.md +72 -0
  10. mirascope/llm/__init__.py +19 -0
  11. mirascope/llm/calls/decorator.py +17 -24
  12. mirascope/llm/formatting/__init__.py +2 -2
  13. mirascope/llm/formatting/format.py +2 -4
  14. mirascope/llm/formatting/types.py +19 -2
  15. mirascope/llm/models/models.py +66 -146
  16. mirascope/llm/prompts/decorator.py +5 -16
  17. mirascope/llm/prompts/prompts.py +5 -13
  18. mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -7
  19. mirascope/llm/providers/anthropic/_utils/beta_encode.py +22 -16
  20. mirascope/llm/providers/anthropic/_utils/decode.py +45 -7
  21. mirascope/llm/providers/anthropic/_utils/encode.py +28 -15
  22. mirascope/llm/providers/anthropic/beta_provider.py +33 -69
  23. mirascope/llm/providers/anthropic/provider.py +52 -91
  24. mirascope/llm/providers/base/_utils.py +4 -9
  25. mirascope/llm/providers/base/base_provider.py +89 -205
  26. mirascope/llm/providers/google/_utils/decode.py +51 -1
  27. mirascope/llm/providers/google/_utils/encode.py +38 -21
  28. mirascope/llm/providers/google/provider.py +33 -69
  29. mirascope/llm/providers/mirascope/provider.py +25 -61
  30. mirascope/llm/providers/mlx/encoding/base.py +3 -6
  31. mirascope/llm/providers/mlx/encoding/transformers.py +4 -8
  32. mirascope/llm/providers/mlx/mlx.py +9 -21
  33. mirascope/llm/providers/mlx/provider.py +33 -69
  34. mirascope/llm/providers/openai/completions/_utils/encode.py +39 -20
  35. mirascope/llm/providers/openai/completions/base_provider.py +34 -75
  36. mirascope/llm/providers/openai/provider.py +25 -61
  37. mirascope/llm/providers/openai/responses/_utils/decode.py +31 -2
  38. mirascope/llm/providers/openai/responses/_utils/encode.py +32 -17
  39. mirascope/llm/providers/openai/responses/provider.py +34 -75
  40. mirascope/llm/responses/__init__.py +2 -1
  41. mirascope/llm/responses/base_stream_response.py +4 -0
  42. mirascope/llm/responses/response.py +8 -12
  43. mirascope/llm/responses/stream_response.py +8 -12
  44. mirascope/llm/responses/usage.py +44 -0
  45. mirascope/llm/tools/__init__.py +24 -0
  46. mirascope/llm/tools/provider_tools.py +18 -0
  47. mirascope/llm/tools/tool_schema.py +4 -2
  48. mirascope/llm/tools/toolkit.py +24 -6
  49. mirascope/llm/tools/types.py +112 -0
  50. mirascope/llm/tools/web_search_tool.py +32 -0
  51. mirascope/ops/__init__.py +19 -1
  52. mirascope/ops/_internal/instrumentation/__init__.py +20 -0
  53. mirascope/ops/_internal/instrumentation/llm/common.py +19 -49
  54. mirascope/ops/_internal/instrumentation/llm/model.py +61 -82
  55. mirascope/ops/_internal/instrumentation/llm/serialize.py +36 -12
  56. mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
  57. mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
  58. mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
  59. mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
  60. mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
  61. {mirascope-2.0.2.dist-info → mirascope-2.1.0.dist-info}/METADATA +96 -68
  62. {mirascope-2.0.2.dist-info → mirascope-2.1.0.dist-info}/RECORD +64 -54
  63. {mirascope-2.0.2.dist-info → mirascope-2.1.0.dist-info}/WHEEL +0 -0
  64. {mirascope-2.0.2.dist-info → mirascope-2.1.0.dist-info}/licenses/LICENSE +0 -0
mirascope/llm/__init__.py CHANGED
@@ -75,6 +75,7 @@ from .exceptions import (
75
75
  )
76
76
  from .formatting import (
77
77
  Format,
78
+ FormatSpec,
78
79
  FormattableT,
79
80
  FormattingMode,
80
81
  OutputParser,
@@ -130,6 +131,7 @@ from .responses import (
130
131
  ContextResponse,
131
132
  ContextStreamResponse,
132
133
  FinishReason,
134
+ ProviderToolUsage,
133
135
  RawMessageChunk,
134
136
  Response,
135
137
  RootResponse,
@@ -144,18 +146,25 @@ from .responses import (
144
146
  )
145
147
  from .tools import (
146
148
  AnyToolFn,
149
+ AnyTools,
147
150
  AnyToolSchema,
148
151
  AsyncContextTool,
149
152
  AsyncContextToolkit,
153
+ AsyncContextTools,
150
154
  AsyncTool,
151
155
  AsyncToolkit,
156
+ AsyncTools,
152
157
  BaseToolkit,
153
158
  ContextTool,
154
159
  ContextToolkit,
160
+ ContextTools,
161
+ ProviderTool,
155
162
  Tool,
156
163
  Toolkit,
157
164
  ToolkitT,
165
+ Tools,
158
166
  ToolSchema,
167
+ WebSearchTool,
159
168
  tool,
160
169
  )
161
170
  from .types import Jsonable
@@ -165,6 +174,7 @@ __all__ = [
165
174
  "AnyResponse",
166
175
  "AnyToolFn",
167
176
  "AnyToolSchema",
177
+ "AnyTools",
168
178
  "AssistantContent",
169
179
  "AssistantContentChunk",
170
180
  "AssistantContentPart",
@@ -177,6 +187,7 @@ __all__ = [
177
187
  "AsyncContextStreamResponse",
178
188
  "AsyncContextTool",
179
189
  "AsyncContextToolkit",
190
+ "AsyncContextTools",
180
191
  "AsyncPrompt",
181
192
  "AsyncResponse",
182
193
  "AsyncStream",
@@ -186,6 +197,7 @@ __all__ = [
186
197
  "AsyncTool",
187
198
  "AsyncToolCallStream",
188
199
  "AsyncToolkit",
200
+ "AsyncTools",
189
201
  "Audio",
190
202
  "AuthenticationError",
191
203
  "BadRequestError",
@@ -203,12 +215,14 @@ __all__ = [
203
215
  "ContextStreamResponse",
204
216
  "ContextTool",
205
217
  "ContextToolkit",
218
+ "ContextTools",
206
219
  "DepsT",
207
220
  "Document",
208
221
  "Error",
209
222
  "FeatureNotSupportedError",
210
223
  "FinishReason",
211
224
  "Format",
225
+ "FormatSpec",
212
226
  "FormattableT",
213
227
  "FormattingMode",
214
228
  "Image",
@@ -229,6 +243,8 @@ __all__ = [
229
243
  "Provider",
230
244
  "ProviderError",
231
245
  "ProviderId",
246
+ "ProviderTool",
247
+ "ProviderToolUsage",
232
248
  "RateLimitError",
233
249
  "RawMessageChunk",
234
250
  "Response",
@@ -266,12 +282,15 @@ __all__ = [
266
282
  "ToolSchema",
267
283
  "Toolkit",
268
284
  "ToolkitT",
285
+ "Tools",
269
286
  "URLImageSource",
270
287
  "Usage",
271
288
  "UsageDeltaChunk",
272
289
  "UserContent",
273
290
  "UserContentPart",
274
291
  "UserMessage",
292
+ "WebSearchTool",
293
+ "WebSearchTool",
275
294
  "call",
276
295
  "calls",
277
296
  "content",
@@ -8,7 +8,7 @@ from typing import TYPE_CHECKING, Generic, cast, overload
8
8
  from typing_extensions import Unpack
9
9
 
10
10
  from ..context import DepsT
11
- from ..formatting import Format, FormattableT, OutputParser
11
+ from ..formatting import FormatSpec, FormattableT
12
12
  from ..models import Model
13
13
  from ..prompts import (
14
14
  AsyncContextMessageTemplate,
@@ -29,6 +29,7 @@ from ..tools import (
29
29
  AsyncToolkit,
30
30
  ContextTool,
31
31
  ContextToolkit,
32
+ ProviderTool,
32
33
  Tool,
33
34
  Toolkit,
34
35
  ToolT,
@@ -58,12 +59,10 @@ class CallDecorator(Generic[ToolT, FormattableT]):
58
59
  model: Model
59
60
  """The default model to use with this call. May be overridden."""
60
61
 
61
- tools: Sequence[ToolT] | None
62
+ tools: Sequence[ToolT | ProviderTool] | None
62
63
  """The tools that are included in the prompt, if any."""
63
64
 
64
- format: (
65
- type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT] | None
66
- )
65
+ format: FormatSpec[FormattableT] | None
67
66
  """The structured output format off the prompt, if any."""
68
67
 
69
68
  @overload
@@ -114,7 +113,8 @@ class CallDecorator(Generic[ToolT, FormattableT]):
114
113
 
115
114
  if is_context and is_async:
116
115
  tools = cast(
117
- Sequence[AsyncTool | AsyncContextTool[DepsT]] | None, self.tools
116
+ Sequence[AsyncTool | AsyncContextTool[DepsT] | ProviderTool] | None,
117
+ self.tools,
118
118
  )
119
119
  prompt = AsyncContextPrompt(
120
120
  fn=fn,
@@ -126,7 +126,9 @@ class CallDecorator(Generic[ToolT, FormattableT]):
126
126
  default_model=self.model,
127
127
  )
128
128
  elif is_context:
129
- tools = cast(Sequence[Tool | ContextTool[DepsT]] | None, self.tools)
129
+ tools = cast(
130
+ Sequence[Tool | ContextTool[DepsT] | ProviderTool] | None, self.tools
131
+ )
130
132
  prompt = ContextPrompt(
131
133
  fn=fn,
132
134
  toolkit=ContextToolkit(tools=tools),
@@ -137,7 +139,7 @@ class CallDecorator(Generic[ToolT, FormattableT]):
137
139
  default_model=self.model,
138
140
  )
139
141
  elif is_async:
140
- tools = cast(Sequence[AsyncTool] | None, self.tools)
142
+ tools = cast(Sequence[AsyncTool | ProviderTool] | None, self.tools)
141
143
  prompt = AsyncPrompt(
142
144
  fn=fn, toolkit=AsyncToolkit(tools=tools), format=self.format
143
145
  )
@@ -146,7 +148,7 @@ class CallDecorator(Generic[ToolT, FormattableT]):
146
148
  default_model=self.model,
147
149
  )
148
150
  else:
149
- tools = cast(Sequence[Tool] | None, self.tools)
151
+ tools = cast(Sequence[Tool | ProviderTool] | None, self.tools)
150
152
  prompt = Prompt(fn=fn, toolkit=Toolkit(tools=tools), format=self.format)
151
153
  return Call(
152
154
  prompt=prompt,
@@ -158,11 +160,8 @@ class CallDecorator(Generic[ToolT, FormattableT]):
158
160
  def call(
159
161
  model: ModelId,
160
162
  *,
161
- tools: Sequence[ToolT] | None = None,
162
- format: type[FormattableT]
163
- | Format[FormattableT]
164
- | OutputParser[FormattableT]
165
- | None = None,
163
+ tools: Sequence[ToolT | ProviderTool] | None = None,
164
+ format: FormatSpec[FormattableT] | None = None,
166
165
  **params: Unpack[Params],
167
166
  ) -> CallDecorator[ToolT, FormattableT]:
168
167
  """Decorator for converting prompt functions into LLM calls.
@@ -176,11 +175,8 @@ def call(
176
175
  def call(
177
176
  model: Model,
178
177
  *,
179
- tools: Sequence[ToolT] | None = None,
180
- format: type[FormattableT]
181
- | Format[FormattableT]
182
- | OutputParser[FormattableT]
183
- | None = None,
178
+ tools: Sequence[ToolT | ProviderTool] | None = None,
179
+ format: FormatSpec[FormattableT] | None = None,
184
180
  ) -> CallDecorator[ToolT, FormattableT]:
185
181
  """Decorator for converting prompt functions into LLM calls.
186
182
 
@@ -192,11 +188,8 @@ def call(
192
188
  def call(
193
189
  model: ModelId | Model,
194
190
  *,
195
- tools: Sequence[ToolT] | None = None,
196
- format: type[FormattableT]
197
- | Format[FormattableT]
198
- | OutputParser[FormattableT]
199
- | None = None,
191
+ tools: Sequence[ToolT | ProviderTool] | None = None,
192
+ format: FormatSpec[FormattableT] | None = None,
200
193
  **params: Unpack[Params],
201
194
  ) -> CallDecorator[ToolT, FormattableT]:
202
195
  """Decorates a `MessageTemplate` to create a `Call` that can be invoked directly.
@@ -18,11 +18,11 @@ from .primitives import (
18
18
  create_wrapper_model,
19
19
  is_primitive_type,
20
20
  )
21
- from .types import FormattableT, FormattingMode
21
+ from .types import FormatSpec, FormattableT, FormattingMode
22
22
 
23
23
  __all__ = [
24
24
  "Format",
25
- "FormattableT",
25
+ "FormatSpec",
26
26
  "FormattableT",
27
27
  "FormattingMode",
28
28
  "FromCallArgs",
@@ -11,7 +11,7 @@ from ..tools import FORMAT_TOOL_NAME, ToolFn, ToolParameterSchema, ToolSchema
11
11
  from ..types import NoneType
12
12
  from .output_parser import OutputParser, is_output_parser
13
13
  from .primitives import create_wrapper_model, is_primitive_type
14
- from .types import FormattableT, FormattingMode, HasFormattingInstructions
14
+ from .types import FormatSpec, FormattableT, FormattingMode, HasFormattingInstructions
15
15
 
16
16
  TOOL_MODE_INSTRUCTIONS = f"""Always respond to the user's query using the {FORMAT_TOOL_NAME} tool for structured output."""
17
17
 
@@ -267,9 +267,7 @@ def format(
267
267
 
268
268
 
269
269
  def resolve_format(
270
- formattable: (
271
- type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT] | None
272
- ),
270
+ formattable: FormatSpec[FormattableT] | None,
273
271
  default_mode: FormattingMode,
274
272
  ) -> Format[FormattableT] | None:
275
273
  """Resolve a `Format` (or None) from a possible `Format`, Formattable, or `OutputParser`.
@@ -1,15 +1,32 @@
1
1
  """Type for the formatting module."""
2
2
 
3
- from typing import Literal, Protocol, runtime_checkable
4
- from typing_extensions import TypeVar
3
+ from typing import TYPE_CHECKING, Literal, Protocol, runtime_checkable
4
+ from typing_extensions import TypeAliasType, TypeVar
5
5
 
6
6
  from pydantic import BaseModel
7
7
 
8
8
  from .primitives import PrimitiveType
9
9
 
10
+ if TYPE_CHECKING:
11
+ from .format import Format
12
+ from .output_parser import OutputParser
13
+
10
14
  FormattableT = TypeVar(
11
15
  "FormattableT", bound=BaseModel | PrimitiveType | None, default=None
12
16
  )
17
+
18
+ FormatSpec = TypeAliasType(
19
+ "FormatSpec",
20
+ "type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT]",
21
+ type_params=(FormattableT,),
22
+ )
23
+ """Type alias for format parameter types.
24
+
25
+ A FormatSpec can be:
26
+ - A type (class) that represents the format schema (e.g., a Pydantic BaseModel)
27
+ - A Format wrapper that includes mode and other metadata
28
+ - An OutputParser for custom parsing logic
29
+ """
13
30
  """Type variable for structured response format types.
14
31
 
15
32
  This TypeVar represents the type of structured output format that LLM responses