mirascope 2.0.1__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. mirascope/_stubs.py +39 -18
  2. mirascope/_utils.py +34 -0
  3. mirascope/api/_generated/__init__.py +4 -0
  4. mirascope/api/_generated/organization_invitations/client.py +2 -2
  5. mirascope/api/_generated/organization_invitations/raw_client.py +2 -2
  6. mirascope/api/_generated/project_memberships/__init__.py +4 -0
  7. mirascope/api/_generated/project_memberships/client.py +91 -0
  8. mirascope/api/_generated/project_memberships/raw_client.py +239 -0
  9. mirascope/api/_generated/project_memberships/types/__init__.py +4 -0
  10. mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
  11. mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
  12. mirascope/api/_generated/reference.md +73 -1
  13. mirascope/llm/__init__.py +19 -0
  14. mirascope/llm/calls/calls.py +28 -21
  15. mirascope/llm/calls/decorator.py +17 -24
  16. mirascope/llm/formatting/__init__.py +2 -2
  17. mirascope/llm/formatting/format.py +2 -4
  18. mirascope/llm/formatting/types.py +19 -2
  19. mirascope/llm/models/models.py +66 -146
  20. mirascope/llm/prompts/decorator.py +5 -16
  21. mirascope/llm/prompts/prompts.py +35 -38
  22. mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -7
  23. mirascope/llm/providers/anthropic/_utils/beta_encode.py +22 -16
  24. mirascope/llm/providers/anthropic/_utils/decode.py +45 -7
  25. mirascope/llm/providers/anthropic/_utils/encode.py +28 -15
  26. mirascope/llm/providers/anthropic/beta_provider.py +33 -69
  27. mirascope/llm/providers/anthropic/provider.py +52 -91
  28. mirascope/llm/providers/base/_utils.py +4 -9
  29. mirascope/llm/providers/base/base_provider.py +89 -205
  30. mirascope/llm/providers/google/_utils/decode.py +51 -1
  31. mirascope/llm/providers/google/_utils/encode.py +38 -21
  32. mirascope/llm/providers/google/provider.py +33 -69
  33. mirascope/llm/providers/mirascope/provider.py +25 -61
  34. mirascope/llm/providers/mlx/encoding/base.py +3 -6
  35. mirascope/llm/providers/mlx/encoding/transformers.py +4 -8
  36. mirascope/llm/providers/mlx/mlx.py +9 -21
  37. mirascope/llm/providers/mlx/provider.py +33 -69
  38. mirascope/llm/providers/openai/completions/_utils/encode.py +39 -20
  39. mirascope/llm/providers/openai/completions/base_provider.py +34 -75
  40. mirascope/llm/providers/openai/provider.py +25 -61
  41. mirascope/llm/providers/openai/responses/_utils/decode.py +31 -2
  42. mirascope/llm/providers/openai/responses/_utils/encode.py +32 -17
  43. mirascope/llm/providers/openai/responses/provider.py +34 -75
  44. mirascope/llm/responses/__init__.py +2 -1
  45. mirascope/llm/responses/base_stream_response.py +4 -0
  46. mirascope/llm/responses/response.py +8 -12
  47. mirascope/llm/responses/stream_response.py +8 -12
  48. mirascope/llm/responses/usage.py +44 -0
  49. mirascope/llm/tools/__init__.py +24 -0
  50. mirascope/llm/tools/provider_tools.py +18 -0
  51. mirascope/llm/tools/tool_schema.py +11 -4
  52. mirascope/llm/tools/toolkit.py +24 -6
  53. mirascope/llm/tools/types.py +112 -0
  54. mirascope/llm/tools/web_search_tool.py +32 -0
  55. mirascope/ops/__init__.py +19 -1
  56. mirascope/ops/_internal/closure.py +4 -1
  57. mirascope/ops/_internal/exporters/exporters.py +13 -46
  58. mirascope/ops/_internal/exporters/utils.py +37 -0
  59. mirascope/ops/_internal/instrumentation/__init__.py +20 -0
  60. mirascope/ops/_internal/instrumentation/llm/common.py +19 -49
  61. mirascope/ops/_internal/instrumentation/llm/model.py +61 -82
  62. mirascope/ops/_internal/instrumentation/llm/serialize.py +36 -12
  63. mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
  64. mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
  65. mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
  66. mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
  67. mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
  68. mirascope/ops/_internal/traced_calls.py +14 -0
  69. mirascope/ops/_internal/traced_functions.py +7 -2
  70. mirascope/ops/_internal/utils.py +12 -4
  71. mirascope/ops/_internal/versioned_functions.py +1 -1
  72. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/METADATA +96 -68
  73. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/RECORD +75 -64
  74. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/WHEEL +0 -0
  75. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,11 +1,12 @@
1
1
  """Concrete Prompt classes for generating messages with tools and formatting."""
2
2
 
3
- from collections.abc import Sequence
4
- from dataclasses import dataclass
5
- from typing import Generic, overload
3
+ from collections.abc import Callable, Sequence
4
+ from dataclasses import dataclass, field
5
+ from typing import Any, Generic, TypeVar, overload
6
6
 
7
+ from ..._utils import copy_function_metadata
7
8
  from ..context import Context, DepsT
8
- from ..formatting import Format, FormattableT, OutputParser
9
+ from ..formatting import FormatSpec, FormattableT
9
10
  from ..messages import Message, promote_to_messages
10
11
  from ..models import Model
11
12
  from ..providers import ModelId
@@ -19,12 +20,7 @@ from ..responses import (
19
20
  Response,
20
21
  StreamResponse,
21
22
  )
22
- from ..tools import (
23
- AsyncContextToolkit,
24
- AsyncToolkit,
25
- ContextToolkit,
26
- Toolkit,
27
- )
23
+ from ..tools import AsyncContextToolkit, AsyncToolkit, ContextToolkit, Toolkit
28
24
  from ..types import P
29
25
  from .protocols import (
30
26
  AsyncContextMessageTemplate,
@@ -33,9 +29,26 @@ from .protocols import (
33
29
  MessageTemplate,
34
30
  )
35
31
 
32
+ FunctionT = TypeVar("FunctionT", bound=Callable[..., Any])
33
+
34
+
35
+ @dataclass(kw_only=True)
36
+ class BasePrompt(Generic[FunctionT]):
37
+ """Base class for all Prompt types with shared metadata functionality."""
38
+
39
+ fn: FunctionT
40
+ """The underlying prompt function that generates message content."""
41
+
42
+ __name__: str = field(init=False, repr=False, default="")
43
+ """The name of the underlying function (preserved for decorator stacking)."""
44
+
45
+ def __post_init__(self) -> None:
46
+ """Preserve standard function attributes for decorator stacking."""
47
+ copy_function_metadata(self, self.fn)
48
+
36
49
 
37
50
  @dataclass
38
- class Prompt(Generic[P, FormattableT]):
51
+ class Prompt(BasePrompt[MessageTemplate[P]], Generic[P, FormattableT]):
39
52
  """A prompt that can be called with a model to generate a response.
40
53
 
41
54
  Created by decorating a `MessageTemplate` with `llm.prompt`. The decorated
@@ -45,15 +58,10 @@ class Prompt(Generic[P, FormattableT]):
45
58
  It can be invoked with a model: `prompt(model, *args, **kwargs)`.
46
59
  """
47
60
 
48
- fn: MessageTemplate[P]
49
- """The underlying prompt function that generates message content."""
50
-
51
61
  toolkit: Toolkit
52
62
  """The toolkit containing this prompt's tools."""
53
63
 
54
- format: (
55
- type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT] | None
56
- )
64
+ format: FormatSpec[FormattableT] | None
57
65
  """The response format for the generated response."""
58
66
 
59
67
  def messages(self, *args: P.args, **kwargs: P.kwargs) -> Sequence[Message]:
@@ -134,7 +142,7 @@ class Prompt(Generic[P, FormattableT]):
134
142
 
135
143
 
136
144
  @dataclass
137
- class AsyncPrompt(Generic[P, FormattableT]):
145
+ class AsyncPrompt(BasePrompt[AsyncMessageTemplate[P]], Generic[P, FormattableT]):
138
146
  """An async prompt that can be called with a model to generate a response.
139
147
 
140
148
  Created by decorating an async `MessageTemplate` with `llm.prompt`. The decorated
@@ -144,15 +152,10 @@ class AsyncPrompt(Generic[P, FormattableT]):
144
152
  It can be invoked with a model: `await prompt(model, *args, **kwargs)`.
145
153
  """
146
154
 
147
- fn: AsyncMessageTemplate[P]
148
- """The underlying async prompt function that generates message content."""
149
-
150
155
  toolkit: AsyncToolkit
151
156
  """The toolkit containing this prompt's async tools."""
152
157
 
153
- format: (
154
- type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT] | None
155
- )
158
+ format: FormatSpec[FormattableT] | None
156
159
  """The response format for the generated response."""
157
160
 
158
161
  async def messages(self, *args: P.args, **kwargs: P.kwargs) -> Sequence[Message]:
@@ -235,7 +238,9 @@ class AsyncPrompt(Generic[P, FormattableT]):
235
238
 
236
239
 
237
240
  @dataclass
238
- class ContextPrompt(Generic[P, DepsT, FormattableT]):
241
+ class ContextPrompt(
242
+ BasePrompt[ContextMessageTemplate[P, DepsT]], Generic[P, DepsT, FormattableT]
243
+ ):
239
244
  """A context-aware prompt that can be called with a model to generate a response.
240
245
 
241
246
  Created by decorating a `ContextMessageTemplate` with `llm.prompt`. The decorated
@@ -246,15 +251,10 @@ class ContextPrompt(Generic[P, DepsT, FormattableT]):
246
251
  It can be invoked with a model: `prompt(model, ctx, *args, **kwargs)`.
247
252
  """
248
253
 
249
- fn: ContextMessageTemplate[P, DepsT]
250
- """The underlying context-aware prompt function that generates message content."""
251
-
252
254
  toolkit: ContextToolkit[DepsT]
253
255
  """The toolkit containing this prompt's context-aware tools."""
254
256
 
255
- format: (
256
- type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT] | None
257
- )
257
+ format: FormatSpec[FormattableT] | None
258
258
  """The response format for the generated response."""
259
259
 
260
260
  def messages(
@@ -361,7 +361,9 @@ class ContextPrompt(Generic[P, DepsT, FormattableT]):
361
361
 
362
362
 
363
363
  @dataclass
364
- class AsyncContextPrompt(Generic[P, DepsT, FormattableT]):
364
+ class AsyncContextPrompt(
365
+ BasePrompt[AsyncContextMessageTemplate[P, DepsT]], Generic[P, DepsT, FormattableT]
366
+ ):
365
367
  """An async context-aware prompt that can be called with a model to generate a response.
366
368
 
367
369
  Created by decorating an async `ContextMessageTemplate` with `llm.prompt`. The decorated
@@ -372,15 +374,10 @@ class AsyncContextPrompt(Generic[P, DepsT, FormattableT]):
372
374
  It can be invoked with a model: `await prompt(model, ctx, *args, **kwargs)`.
373
375
  """
374
376
 
375
- fn: AsyncContextMessageTemplate[P, DepsT]
376
- """The underlying async context-aware prompt function that generates message content."""
377
-
378
377
  toolkit: AsyncContextToolkit[DepsT]
379
378
  """The toolkit containing this prompt's async context-aware tools."""
380
379
 
381
- format: (
382
- type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT] | None
383
- )
380
+ format: FormatSpec[FormattableT] | None
384
381
  """The response format for the generated response."""
385
382
 
386
383
  async def messages(
@@ -44,7 +44,7 @@ from ....responses import (
44
44
  UsageDeltaChunk,
45
45
  )
46
46
  from ..model_id import model_name
47
- from .decode import decode_usage
47
+ from .decode import decode_usage, extract_tool_usage
48
48
 
49
49
  BETA_FINISH_REASON_MAP = {
50
50
  "max_tokens": FinishReason.MAX_TOKENS,
@@ -53,7 +53,9 @@ BETA_FINISH_REASON_MAP = {
53
53
  }
54
54
 
55
55
 
56
- def _decode_beta_assistant_content(content: BetaContentBlock) -> AssistantContentPart:
56
+ def _decode_beta_assistant_content(
57
+ content: BetaContentBlock,
58
+ ) -> AssistantContentPart | None:
57
59
  """Convert Beta content block to mirascope AssistantContentPart."""
58
60
  if content.type == "text":
59
61
  return Text(text=content.text)
@@ -65,6 +67,8 @@ def _decode_beta_assistant_content(content: BetaContentBlock) -> AssistantConten
65
67
  )
66
68
  elif content.type == "thinking":
67
69
  return Thought(thought=content.thinking)
70
+ elif content.type in ("server_tool_use", "web_search_tool_result"):
71
+ return None # Skip server-side tool content, preserved in raw_message
68
72
  else:
69
73
  raise NotImplementedError(
70
74
  f"Support for beta content type `{content.type}` is not yet implemented."
@@ -78,7 +82,13 @@ def beta_decode_response(
78
82
  include_thoughts: bool,
79
83
  ) -> tuple[AssistantMessage, FinishReason | None, Usage]:
80
84
  """Convert Beta message to mirascope AssistantMessage and usage."""
81
- content = [_decode_beta_assistant_content(part) for part in response.content]
85
+ content = [
86
+ part
87
+ for part in (
88
+ _decode_beta_assistant_content(block) for block in response.content
89
+ )
90
+ if part is not None
91
+ ]
82
92
  if not include_thoughts:
83
93
  content = [part for part in content if part.type != "thought"]
84
94
  assistant_message = AssistantMessage(
@@ -157,6 +167,8 @@ class _BetaChunkProcessor:
157
167
  "type": "redacted_thinking",
158
168
  "data": content_block.data,
159
169
  }
170
+ elif content_block.type in ("server_tool_use", "web_search_tool_result"):
171
+ pass # Skip server-side tool content
160
172
  else:
161
173
  raise NotImplementedError(
162
174
  f"Support for beta content block type `{content_block.type}` "
@@ -164,8 +176,8 @@ class _BetaChunkProcessor:
164
176
  )
165
177
 
166
178
  elif event.type == "content_block_delta":
167
- if self.current_block_param is None: # pragma: no cover
168
- raise RuntimeError("Received delta without a current block")
179
+ if self.current_block_param is None:
180
+ return # Skip deltas for server-side tool content
169
181
 
170
182
  delta = event.delta
171
183
  if delta.type == "text_delta":
@@ -198,14 +210,16 @@ class _BetaChunkProcessor:
198
210
  f"Received signature_delta for {self.current_block_param['type']} block"
199
211
  )
200
212
  self.current_block_param["signature"] += delta.signature
213
+ elif delta.type == "citations_delta":
214
+ pass # Skip citations delta, preserved in raw_message
201
215
  else:
202
216
  raise RuntimeError(
203
217
  f"Received unsupported delta type: {delta.type}"
204
218
  ) # pragma: no cover
205
219
 
206
220
  elif event.type == "content_block_stop":
207
- if self.current_block_param is None: # pragma: no cover
208
- raise RuntimeError("Received stop without a current block")
221
+ if self.current_block_param is None:
222
+ return # Skip stop for server-side tool content
209
223
 
210
224
  block_type = self.current_block_param["type"]
211
225
 
@@ -245,6 +259,7 @@ class _BetaChunkProcessor:
245
259
  cache_read_tokens=usage.cache_read_input_tokens or 0,
246
260
  cache_write_tokens=usage.cache_creation_input_tokens or 0,
247
261
  reasoning_tokens=0,
262
+ provider_tool_usage=extract_tool_usage(usage),
248
263
  )
249
264
 
250
265
  def raw_message_chunk(self) -> RawMessageChunk:
@@ -7,12 +7,15 @@ from typing_extensions import Required
7
7
  from anthropic import Omit
8
8
  from anthropic.types.anthropic_beta_param import AnthropicBetaParam
9
9
  from anthropic.types.beta import (
10
+ BetaCacheControlEphemeralParam,
10
11
  BetaContentBlockParam,
11
12
  BetaMessageParam,
12
13
  BetaTextBlockParam,
13
14
  BetaThinkingConfigParam,
14
15
  BetaToolChoiceParam,
15
16
  BetaToolParam,
17
+ BetaToolUnionParam,
18
+ BetaWebSearchTool20250305Param,
16
19
  )
17
20
  from pydantic import BaseModel
18
21
 
@@ -20,12 +23,12 @@ from ....content import ContentPart
20
23
  from ....exceptions import FeatureNotSupportedError
21
24
  from ....formatting import (
22
25
  Format,
26
+ FormatSpec,
23
27
  FormattableT,
24
- OutputParser,
25
28
  resolve_format,
26
29
  )
27
30
  from ....messages import AssistantMessage, Message, UserMessage
28
- from ....tools import AnyToolSchema, BaseToolkit
31
+ from ....tools import AnyToolSchema, BaseToolkit, ProviderTool, WebSearchTool
29
32
  from ...base import _utils as _base_utils
30
33
  from ..model_id import model_name
31
34
  from ..model_info import MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS
@@ -49,7 +52,7 @@ class BetaParseKwargs(TypedDict, total=False):
49
52
  max_tokens: Required[int]
50
53
  messages: Sequence[BetaMessageParam]
51
54
  system: Sequence[BetaTextBlockParam] | Omit
52
- tools: Sequence[BetaToolParam] | Omit
55
+ tools: Sequence[BetaToolUnionParam] | Omit
53
56
  tool_choice: BetaToolChoiceParam | Omit
54
57
  temperature: float | Omit
55
58
  top_p: float | Omit
@@ -135,8 +138,8 @@ def _beta_encode_messages(
135
138
 
136
139
 
137
140
  def _beta_convert_tool_to_tool_param(
138
- tool: AnyToolSchema, model_supports_strict: bool
139
- ) -> BetaToolParam:
141
+ tool: "AnyToolSchema | ProviderTool", model_supports_strict: bool
142
+ ) -> BetaToolUnionParam:
140
143
  """Convert a single Mirascope tool to Beta Anthropic tool format.
141
144
 
142
145
  If the tool has strict=True (or None, and the model supports strict), the schema
@@ -144,6 +147,14 @@ def _beta_convert_tool_to_tool_param(
144
147
  by adding additionalProperties: false to all object schemas, and strict=True
145
148
  is passed to the API.
146
149
  """
150
+ if isinstance(tool, WebSearchTool):
151
+ return BetaWebSearchTool20250305Param(
152
+ type="web_search_20250305", name="web_search"
153
+ )
154
+ if isinstance(tool, ProviderTool):
155
+ raise FeatureNotSupportedError(
156
+ f"Provider tool {tool.name}", provider_id="anthropic-beta"
157
+ )
147
158
  schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
148
159
  schema_dict["type"] = "object"
149
160
 
@@ -163,11 +174,8 @@ def beta_encode_request(
163
174
  *,
164
175
  model_id: str,
165
176
  messages: Sequence[Message],
166
- tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
167
- format: type[FormattableT]
168
- | Format[FormattableT]
169
- | OutputParser[FormattableT]
170
- | None,
177
+ tools: BaseToolkit[AnyToolSchema],
178
+ format: FormatSpec[FormattableT] | None,
171
179
  params: "Params",
172
180
  ) -> tuple[Sequence[Message], Format[FormattableT] | None, BetaParseKwargs]:
173
181
  """Prepares a request for the Anthropic beta.messages.parse method."""
@@ -185,13 +193,11 @@ def beta_encode_request(
185
193
  }
186
194
  )
187
195
 
188
- tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
189
-
190
196
  model_supports_strict = (
191
197
  model_name(model_id) not in MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS
192
198
  )
193
199
  # Check for strict tools on models that don't support them
194
- if _base_utils.has_strict_tools(tools) and not model_supports_strict:
200
+ if _base_utils.has_strict_tools(tools.tools) and not model_supports_strict:
195
201
  raise FeatureNotSupportedError(
196
202
  feature="strict tools",
197
203
  provider_id="anthropic",
@@ -204,7 +210,7 @@ def beta_encode_request(
204
210
  _beta_convert_tool_to_tool_param(
205
211
  tool, model_supports_strict=model_supports_strict
206
212
  )
207
- for tool in tools
213
+ for tool in tools.tools
208
214
  ]
209
215
  format = resolve_format(format, default_mode=DEFAULT_FORMAT_MODE)
210
216
 
@@ -226,7 +232,7 @@ def beta_encode_request(
226
232
  format_tool_schema, model_supports_strict=model_supports_strict
227
233
  )
228
234
  )
229
- if tools:
235
+ if tools.tools:
230
236
  kwargs["tool_choice"] = {"type": "any"}
231
237
  else:
232
238
  kwargs["tool_choice"] = {
@@ -243,7 +249,7 @@ def beta_encode_request(
243
249
  if anthropic_tools:
244
250
  # Add cache control to the last tool for prompt caching
245
251
  last_tool = anthropic_tools[-1]
246
- last_tool["cache_control"] = {"type": "ephemeral"}
252
+ last_tool["cache_control"] = BetaCacheControlEphemeralParam(type="ephemeral")
247
253
  kwargs["tools"] = anthropic_tools
248
254
 
249
255
  system_message_content, remaining_messages = _base_utils.extract_system_message(
@@ -5,7 +5,7 @@ from typing import Any, TypeAlias, cast
5
5
 
6
6
  from anthropic import types as anthropic_types
7
7
  from anthropic.lib.streaming import AsyncMessageStreamManager, MessageStreamManager
8
- from anthropic.types.beta import BetaUsage
8
+ from anthropic.types.beta import BetaMessageDeltaUsage, BetaUsage
9
9
 
10
10
  from ....content import (
11
11
  AssistantContentPart,
@@ -28,6 +28,7 @@ from ....responses import (
28
28
  ChunkIterator,
29
29
  FinishReason,
30
30
  FinishReasonChunk,
31
+ ProviderToolUsage,
31
32
  RawMessageChunk,
32
33
  RawStreamEventChunk,
33
34
  Usage,
@@ -43,7 +44,7 @@ ANTHROPIC_FINISH_REASON_MAP = {
43
44
 
44
45
  def _decode_assistant_content(
45
46
  content: anthropic_types.ContentBlock,
46
- ) -> AssistantContentPart:
47
+ ) -> AssistantContentPart | None:
47
48
  """Convert Anthropic content block to mirascope AssistantContentPart."""
48
49
  if content.type == "text":
49
50
  return Text(text=content.text)
@@ -55,12 +56,39 @@ def _decode_assistant_content(
55
56
  )
56
57
  elif content.type == "thinking":
57
58
  return Thought(thought=content.thinking)
59
+ elif content.type in ("server_tool_use", "web_search_tool_result"):
60
+ return None # Skip server-side tool content, preserved in raw_message
58
61
  else:
59
62
  raise NotImplementedError(
60
63
  f"Support for content type `{content.type}` is not yet implemented."
61
64
  )
62
65
 
63
66
 
67
+ def extract_tool_usage(
68
+ usage: (
69
+ anthropic_types.Usage
70
+ | anthropic_types.MessageDeltaUsage
71
+ | BetaUsage
72
+ | BetaMessageDeltaUsage
73
+ ),
74
+ ) -> list[ProviderToolUsage] | None:
75
+ """Extract provider tool usage from Anthropic usage object."""
76
+ server_tool_use = getattr(usage, "server_tool_use", None)
77
+ if server_tool_use is None:
78
+ return None
79
+
80
+ tools: list[ProviderToolUsage] = []
81
+
82
+ # Web search
83
+ web_search_requests = getattr(server_tool_use, "web_search_requests", None)
84
+ if web_search_requests and web_search_requests > 0:
85
+ tools.append(
86
+ ProviderToolUsage(name="web_search", call_count=web_search_requests)
87
+ )
88
+
89
+ return tools if tools else None
90
+
91
+
64
92
  def decode_usage(
65
93
  usage: anthropic_types.Usage | BetaUsage,
66
94
  ) -> Usage:
@@ -76,6 +104,7 @@ def decode_usage(
76
104
  cache_read_tokens=cache_read_tokens,
77
105
  cache_write_tokens=cache_write_tokens,
78
106
  reasoning_tokens=0,
107
+ provider_tool_usage=extract_tool_usage(usage),
79
108
  raw=usage,
80
109
  )
81
110
 
@@ -87,7 +116,11 @@ def decode_response(
87
116
  include_thoughts: bool,
88
117
  ) -> tuple[AssistantMessage, FinishReason | None, Usage]:
89
118
  """Convert Anthropic message to mirascope AssistantMessage and usage."""
90
- content = [_decode_assistant_content(part) for part in response.content]
119
+ content = [
120
+ part
121
+ for part in (_decode_assistant_content(block) for block in response.content)
122
+ if part is not None
123
+ ]
91
124
  if not include_thoughts:
92
125
  content = [part for part in content if part.type != "thought"]
93
126
  assistant_message = AssistantMessage(
@@ -166,12 +199,14 @@ class _AnthropicChunkProcessor:
166
199
  "type": "redacted_thinking",
167
200
  "data": content_block.data,
168
201
  }
202
+ elif content_block.type in ("server_tool_use", "web_search_tool_result"):
203
+ pass # Skip server-side tool content
169
204
  else:
170
205
  raise NotImplementedError
171
206
 
172
207
  elif event.type == "content_block_delta":
173
- if self.current_block_param is None: # pragma: no cover
174
- raise RuntimeError("Received delta without a current block")
208
+ if self.current_block_param is None:
209
+ return # Skip deltas for server-side tool content
175
210
 
176
211
  delta = event.delta
177
212
  if delta.type == "text_delta":
@@ -204,14 +239,16 @@ class _AnthropicChunkProcessor:
204
239
  f"Received signature_delta for {self.current_block_param['type']} block"
205
240
  )
206
241
  self.current_block_param["signature"] += delta.signature
242
+ elif delta.type == "citations_delta":
243
+ pass # Skip citations delta, preserved in raw_message
207
244
  else:
208
245
  raise RuntimeError(
209
246
  f"Received unsupported delta type: {delta.type}"
210
247
  ) # pragma: no cover
211
248
 
212
249
  elif event.type == "content_block_stop":
213
- if self.current_block_param is None: # pragma: no cover
214
- raise RuntimeError("Received stop without a current block")
250
+ if self.current_block_param is None:
251
+ return # Skip stop for server-side tool content
215
252
 
216
253
  block_type = self.current_block_param["type"]
217
254
 
@@ -251,6 +288,7 @@ class _AnthropicChunkProcessor:
251
288
  cache_read_tokens=usage.cache_read_input_tokens or 0,
252
289
  cache_write_tokens=usage.cache_creation_input_tokens or 0,
253
290
  reasoning_tokens=0,
291
+ provider_tool_usage=extract_tool_usage(usage),
254
292
  )
255
293
 
256
294
  def raw_message_chunk(self) -> RawMessageChunk:
@@ -14,12 +14,18 @@ from ....content import ContentPart, ImageMimeType
14
14
  from ....exceptions import FeatureNotSupportedError
15
15
  from ....formatting import (
16
16
  Format,
17
+ FormatSpec,
17
18
  FormattableT,
18
- OutputParser,
19
19
  resolve_format,
20
20
  )
21
21
  from ....messages import AssistantMessage, Message, UserMessage
22
- from ....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
22
+ from ....tools import (
23
+ FORMAT_TOOL_NAME,
24
+ AnyToolSchema,
25
+ BaseToolkit,
26
+ ProviderTool,
27
+ WebSearchTool,
28
+ )
23
29
  from ...base import _utils as _base_utils
24
30
  from ..model_id import AnthropicModelId, model_name
25
31
 
@@ -138,7 +144,7 @@ class MessageCreateKwargs(TypedDict, total=False):
138
144
  max_tokens: Required[int]
139
145
  messages: Sequence[anthropic_types.MessageParam]
140
146
  system: Sequence[anthropic_types.TextBlockParam] | Omit
141
- tools: Sequence[anthropic_types.ToolParam] | Omit
147
+ tools: Sequence[anthropic_types.ToolUnionParam] | Omit
142
148
  tool_choice: anthropic_types.ToolChoiceParam | Omit
143
149
  temperature: float | Omit
144
150
  top_p: float | Omit
@@ -323,8 +329,18 @@ def _encode_messages(
323
329
 
324
330
 
325
331
  @lru_cache(maxsize=128)
326
- def convert_tool_to_tool_param(tool: AnyToolSchema) -> anthropic_types.ToolParam:
332
+ def convert_tool_to_tool_param(
333
+ tool: AnyToolSchema | ProviderTool,
334
+ ) -> anthropic_types.ToolUnionParam:
327
335
  """Convert a single Mirascope tool to Anthropic tool format with caching."""
336
+ if isinstance(tool, WebSearchTool):
337
+ return anthropic_types.WebSearchTool20250305Param(
338
+ type="web_search_20250305", name="web_search"
339
+ )
340
+ if isinstance(tool, ProviderTool):
341
+ raise FeatureNotSupportedError(
342
+ f"Provider tool {tool.name}", provider_id="anthropic"
343
+ )
328
344
  schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
329
345
  schema_dict["type"] = "object"
330
346
  return anthropic_types.ToolParam(
@@ -338,11 +354,8 @@ def encode_request(
338
354
  *,
339
355
  model_id: AnthropicModelId,
340
356
  messages: Sequence[Message],
341
- tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
342
- format: type[FormattableT]
343
- | Format[FormattableT]
344
- | OutputParser[FormattableT]
345
- | None,
357
+ tools: BaseToolkit[AnyToolSchema],
358
+ format: FormatSpec[FormattableT] | None,
346
359
  params: Params,
347
360
  ) -> tuple[Sequence[Message], Format[FormattableT] | None, MessageCreateKwargs]:
348
361
  """Prepares a request for the Anthropic messages.create method."""
@@ -355,10 +368,8 @@ def encode_request(
355
368
  {"model": model_name(model_id), "max_tokens": max_tokens, **processed}
356
369
  )
357
370
 
358
- tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
359
-
360
371
  # Check for strict tools - the non-beta API doesn't support them
361
- if _base_utils.has_strict_tools(tools):
372
+ if _base_utils.has_strict_tools(tools.tools):
362
373
  raise FeatureNotSupportedError(
363
374
  feature="strict tools",
364
375
  provider_id="anthropic",
@@ -366,7 +377,7 @@ def encode_request(
366
377
  message="Anthropic provider does not support strict tools. Try the beta provider.",
367
378
  )
368
379
 
369
- anthropic_tools = [convert_tool_to_tool_param(tool) for tool in tools]
380
+ anthropic_tools = [convert_tool_to_tool_param(tool) for tool in tools.tools]
370
381
  format = resolve_format(format, default_mode=DEFAULT_FORMAT_MODE)
371
382
  if format is not None:
372
383
  if format.mode == "strict":
@@ -378,7 +389,7 @@ def encode_request(
378
389
  if format.mode == "tool":
379
390
  format_tool_schema = format.create_tool_schema()
380
391
  anthropic_tools.append(convert_tool_to_tool_param(format_tool_schema))
381
- if tools:
392
+ if tools.tools:
382
393
  kwargs["tool_choice"] = {"type": "any"}
383
394
  else:
384
395
  kwargs["tool_choice"] = {
@@ -395,7 +406,9 @@ def encode_request(
395
406
  if anthropic_tools:
396
407
  # Add cache control to the last tool for prompt caching
397
408
  last_tool = anthropic_tools[-1]
398
- last_tool["cache_control"] = {"type": "ephemeral"}
409
+ last_tool["cache_control"] = anthropic_types.CacheControlEphemeralParam(
410
+ type="ephemeral"
411
+ )
399
412
  kwargs["tools"] = anthropic_tools
400
413
 
401
414
  system_message_content, remaining_messages = _base_utils.extract_system_message(