mirascope 2.0.1__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. mirascope/_stubs.py +39 -18
  2. mirascope/_utils.py +34 -0
  3. mirascope/api/_generated/__init__.py +4 -0
  4. mirascope/api/_generated/organization_invitations/client.py +2 -2
  5. mirascope/api/_generated/organization_invitations/raw_client.py +2 -2
  6. mirascope/api/_generated/project_memberships/__init__.py +4 -0
  7. mirascope/api/_generated/project_memberships/client.py +91 -0
  8. mirascope/api/_generated/project_memberships/raw_client.py +239 -0
  9. mirascope/api/_generated/project_memberships/types/__init__.py +4 -0
  10. mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
  11. mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
  12. mirascope/api/_generated/reference.md +73 -1
  13. mirascope/llm/__init__.py +19 -0
  14. mirascope/llm/calls/calls.py +28 -21
  15. mirascope/llm/calls/decorator.py +17 -24
  16. mirascope/llm/formatting/__init__.py +2 -2
  17. mirascope/llm/formatting/format.py +2 -4
  18. mirascope/llm/formatting/types.py +19 -2
  19. mirascope/llm/models/models.py +66 -146
  20. mirascope/llm/prompts/decorator.py +5 -16
  21. mirascope/llm/prompts/prompts.py +35 -38
  22. mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -7
  23. mirascope/llm/providers/anthropic/_utils/beta_encode.py +22 -16
  24. mirascope/llm/providers/anthropic/_utils/decode.py +45 -7
  25. mirascope/llm/providers/anthropic/_utils/encode.py +28 -15
  26. mirascope/llm/providers/anthropic/beta_provider.py +33 -69
  27. mirascope/llm/providers/anthropic/provider.py +52 -91
  28. mirascope/llm/providers/base/_utils.py +4 -9
  29. mirascope/llm/providers/base/base_provider.py +89 -205
  30. mirascope/llm/providers/google/_utils/decode.py +51 -1
  31. mirascope/llm/providers/google/_utils/encode.py +38 -21
  32. mirascope/llm/providers/google/provider.py +33 -69
  33. mirascope/llm/providers/mirascope/provider.py +25 -61
  34. mirascope/llm/providers/mlx/encoding/base.py +3 -6
  35. mirascope/llm/providers/mlx/encoding/transformers.py +4 -8
  36. mirascope/llm/providers/mlx/mlx.py +9 -21
  37. mirascope/llm/providers/mlx/provider.py +33 -69
  38. mirascope/llm/providers/openai/completions/_utils/encode.py +39 -20
  39. mirascope/llm/providers/openai/completions/base_provider.py +34 -75
  40. mirascope/llm/providers/openai/provider.py +25 -61
  41. mirascope/llm/providers/openai/responses/_utils/decode.py +31 -2
  42. mirascope/llm/providers/openai/responses/_utils/encode.py +32 -17
  43. mirascope/llm/providers/openai/responses/provider.py +34 -75
  44. mirascope/llm/responses/__init__.py +2 -1
  45. mirascope/llm/responses/base_stream_response.py +4 -0
  46. mirascope/llm/responses/response.py +8 -12
  47. mirascope/llm/responses/stream_response.py +8 -12
  48. mirascope/llm/responses/usage.py +44 -0
  49. mirascope/llm/tools/__init__.py +24 -0
  50. mirascope/llm/tools/provider_tools.py +18 -0
  51. mirascope/llm/tools/tool_schema.py +11 -4
  52. mirascope/llm/tools/toolkit.py +24 -6
  53. mirascope/llm/tools/types.py +112 -0
  54. mirascope/llm/tools/web_search_tool.py +32 -0
  55. mirascope/ops/__init__.py +19 -1
  56. mirascope/ops/_internal/closure.py +4 -1
  57. mirascope/ops/_internal/exporters/exporters.py +13 -46
  58. mirascope/ops/_internal/exporters/utils.py +37 -0
  59. mirascope/ops/_internal/instrumentation/__init__.py +20 -0
  60. mirascope/ops/_internal/instrumentation/llm/common.py +19 -49
  61. mirascope/ops/_internal/instrumentation/llm/model.py +61 -82
  62. mirascope/ops/_internal/instrumentation/llm/serialize.py +36 -12
  63. mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
  64. mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
  65. mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
  66. mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
  67. mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
  68. mirascope/ops/_internal/traced_calls.py +14 -0
  69. mirascope/ops/_internal/traced_functions.py +7 -2
  70. mirascope/ops/_internal/utils.py +12 -4
  71. mirascope/ops/_internal/versioned_functions.py +1 -1
  72. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/METADATA +96 -68
  73. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/RECORD +75 -64
  74. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/WHEEL +0 -0
  75. {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -27,6 +27,7 @@ from ....responses import (
27
27
  ChunkIterator,
28
28
  FinishReason,
29
29
  FinishReasonChunk,
30
+ ProviderToolUsage,
30
31
  RawMessageChunk,
31
32
  RawStreamEventChunk,
32
33
  Usage,
@@ -45,8 +46,32 @@ GOOGLE_FINISH_REASON_MAP = {
45
46
  }
46
47
 
47
48
 
49
+ def _extract_tool_usage(
50
+ candidate: genai_types.Candidate | None,
51
+ ) -> list[ProviderToolUsage] | None:
52
+ """Extract provider tool usage from Google candidate's grounding metadata."""
53
+ if candidate is None: # pragma: no cover
54
+ return None
55
+
56
+ grounding_metadata = candidate.grounding_metadata
57
+ if grounding_metadata is None:
58
+ return None
59
+
60
+ tools: list[ProviderToolUsage] = []
61
+
62
+ # Web search queries indicate grounding was used
63
+ web_search_queries = grounding_metadata.web_search_queries
64
+ if web_search_queries and len(web_search_queries) > 0:
65
+ tools.append(
66
+ ProviderToolUsage(name="web_search", call_count=len(web_search_queries))
67
+ )
68
+
69
+ return tools if tools else None
70
+
71
+
48
72
  def _decode_usage(
49
73
  usage: genai_types.GenerateContentResponseUsageMetadata | None,
74
+ candidate: genai_types.Candidate | None = None,
50
75
  ) -> Usage | None:
51
76
  """Convert Google UsageMetadata to Mirascope Usage."""
52
77
  if (
@@ -65,6 +90,7 @@ def _decode_usage(
65
90
  cache_read_tokens=usage.cached_content_token_count or 0,
66
91
  cache_write_tokens=0,
67
92
  reasoning_tokens=usage.thoughts_token_count or 0,
93
+ provider_tool_usage=_extract_tool_usage(candidate),
68
94
  raw=usage,
69
95
  )
70
96
 
@@ -152,7 +178,8 @@ def decode_response(
152
178
  raw_message=candidate_content.model_dump(),
153
179
  )
154
180
 
155
- usage = _decode_usage(response.usage_metadata)
181
+ candidate = response.candidates[0] if response.candidates else None
182
+ usage = _decode_usage(response.usage_metadata, candidate)
156
183
  return assistant_message, finish_reason, usage
157
184
 
158
185
 
@@ -166,6 +193,8 @@ class _GoogleChunkProcessor:
166
193
  # Track previous cumulative usage to compute deltas
167
194
  self.prev_usage = Usage()
168
195
  self.include_thoughts = include_thoughts
196
+ # Track web search queries count from grounding metadata
197
+ self.web_search_query_count = 0
169
198
 
170
199
  def process_chunk(
171
200
  self, chunk: genai_types.GenerateContentResponse
@@ -239,6 +268,15 @@ class _GoogleChunkProcessor:
239
268
  yield ToolCallEndChunk(id=tool_id)
240
269
  self.current_content_type = None
241
270
 
271
+ # Track web search queries from grounding metadata
272
+ if (
273
+ candidate.grounding_metadata
274
+ and candidate.grounding_metadata.web_search_queries
275
+ ):
276
+ self.web_search_query_count = len(
277
+ candidate.grounding_metadata.web_search_queries
278
+ )
279
+
242
280
  if candidate.finish_reason:
243
281
  if self.current_content_type == "text":
244
282
  yield TextEndChunk()
@@ -262,6 +300,17 @@ class _GoogleChunkProcessor:
262
300
  current_cache_read = usage_metadata.cached_content_token_count or 0
263
301
  current_reasoning = usage_metadata.thoughts_token_count or 0
264
302
 
303
+ # Include provider_tool_usage on the final usage chunk (when finish_reason is present)
304
+ provider_tool_usage = (
305
+ [
306
+ ProviderToolUsage(
307
+ name="web_search", call_count=self.web_search_query_count
308
+ )
309
+ ]
310
+ if candidate.finish_reason and self.web_search_query_count > 0
311
+ else None
312
+ )
313
+
265
314
  yield UsageDeltaChunk(
266
315
  input_tokens=current_input - self.prev_usage.input_tokens,
267
316
  output_tokens=current_output - self.prev_usage.output_tokens,
@@ -269,6 +318,7 @@ class _GoogleChunkProcessor:
269
318
  - self.prev_usage.cache_read_tokens,
270
319
  cache_write_tokens=0,
271
320
  reasoning_tokens=current_reasoning - self.prev_usage.reasoning_tokens,
321
+ provider_tool_usage=provider_tool_usage,
272
322
  )
273
323
 
274
324
  # Update previous usage
@@ -15,12 +15,18 @@ from ....content import ContentPart
15
15
  from ....exceptions import FeatureNotSupportedError
16
16
  from ....formatting import (
17
17
  Format,
18
+ FormatSpec,
18
19
  FormattableT,
19
- OutputParser,
20
20
  resolve_format,
21
21
  )
22
22
  from ....messages import AssistantMessage, Message, UserMessage
23
- from ....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
23
+ from ....tools import (
24
+ FORMAT_TOOL_NAME,
25
+ AnyToolSchema,
26
+ BaseToolkit,
27
+ ProviderTool,
28
+ WebSearchTool,
29
+ )
24
30
  from ...base import _utils as _base_utils
25
31
  from ..model_id import GoogleModelId, model_name
26
32
  from ..model_info import MODELS_WITHOUT_STRUCTURED_OUTPUT_AND_TOOLS_SUPPORT
@@ -244,9 +250,13 @@ def _encode_messages(
244
250
 
245
251
  @lru_cache(maxsize=128)
246
252
  def _convert_tool_to_function_declaration(
247
- tool: AnyToolSchema,
253
+ tool: AnyToolSchema | ProviderTool,
248
254
  ) -> genai_types.FunctionDeclarationDict:
249
255
  """Convert a single Mirascope tool to Google FunctionDeclaration format with caching."""
256
+ if isinstance(tool, ProviderTool):
257
+ raise FeatureNotSupportedError(
258
+ f"Provider tool {tool.name}", provider_id="google"
259
+ )
250
260
  schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
251
261
  schema_dict["type"] = "object"
252
262
 
@@ -270,11 +280,8 @@ def encode_request(
270
280
  *,
271
281
  model_id: GoogleModelId,
272
282
  messages: Sequence[Message],
273
- tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
274
- format: type[FormattableT]
275
- | Format[FormattableT]
276
- | OutputParser[FormattableT]
277
- | None,
283
+ tools: BaseToolkit[AnyToolSchema],
284
+ format: FormatSpec[FormattableT] | None,
278
285
  params: Params,
279
286
  ) -> tuple[Sequence[Message], Format[FormattableT] | None, GoogleKwargs]:
280
287
  """Prepares a request for the genai `Client.models.generate_content` method."""
@@ -314,9 +321,7 @@ def encode_request(
314
321
  if thinking_config.get("encode_thoughts_as_text"):
315
322
  encode_thoughts_as_text = True
316
323
 
317
- tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
318
-
319
- if _base_utils.has_strict_tools(tools):
324
+ if _base_utils.has_strict_tools(tools.tools):
320
325
  raise FeatureNotSupportedError(
321
326
  feature="strict tools",
322
327
  provider_id="google",
@@ -332,12 +337,14 @@ def encode_request(
332
337
  )
333
338
  # Older google models do not allow strict mode when using tools; if so, we use tool
334
339
  # mode when tools are present by default for compatibility. Otherwise, prefer strict mode.
335
- default_mode = "tool" if tools and not allows_strict_mode_with_tools else "strict"
340
+ default_mode = (
341
+ "tool" if tools.tools and not allows_strict_mode_with_tools else "strict"
342
+ )
336
343
  format = resolve_format(format, default_mode=default_mode)
337
344
  if format is not None:
338
345
  if (
339
346
  format.mode in ("strict", "json")
340
- and tools
347
+ and tools.tools
341
348
  and not allows_strict_mode_with_tools
342
349
  ):
343
350
  raise FeatureNotSupportedError(
@@ -358,7 +365,7 @@ def encode_request(
358
365
  function_calling_config = genai_types.FunctionCallingConfigDict(
359
366
  mode=genai_types.FunctionCallingConfigMode.ANY
360
367
  )
361
- if not tools:
368
+ if not tools.tools:
362
369
  function_calling_config["allowed_function_names"] = [FORMAT_TOOL_NAME]
363
370
 
364
371
  google_config["tool_config"] = genai_types.ToolConfigDict(
@@ -372,13 +379,23 @@ def encode_request(
372
379
  messages, format.formatting_instructions
373
380
  )
374
381
 
375
- if tools:
376
- function_declarations = [
377
- _convert_tool_to_function_declaration(tool) for tool in tools
378
- ]
379
- google_tools.append(
380
- genai_types.ToolDict(function_declarations=function_declarations)
381
- )
382
+ if tools.tools:
383
+ # Separate web search tools from function tools
384
+ function_tools = [t for t in tools.tools if not isinstance(t, WebSearchTool)]
385
+ has_web_search = any(isinstance(t, WebSearchTool) for t in tools.tools)
386
+
387
+ if function_tools:
388
+ function_declarations = [
389
+ _convert_tool_to_function_declaration(tool) for tool in function_tools
390
+ ]
391
+ google_tools.append(
392
+ genai_types.ToolDict(function_declarations=function_declarations)
393
+ )
394
+
395
+ if has_web_search:
396
+ google_tools.append(
397
+ genai_types.ToolDict(google_search=genai_types.GoogleSearchDict())
398
+ )
382
399
 
383
400
  if google_tools:
384
401
  google_config["tools"] = cast(genai_types.ToolListUnionDict, google_tools)
@@ -10,7 +10,7 @@ from google.genai import Client
10
10
  from google.genai.types import HttpOptions
11
11
 
12
12
  from ...context import Context, DepsT
13
- from ...formatting import Format, FormattableT, OutputParser
13
+ from ...formatting import FormatSpec, FormattableT
14
14
  from ...messages import Message
15
15
  from ...responses import (
16
16
  AsyncContextResponse,
@@ -23,13 +23,9 @@ from ...responses import (
23
23
  StreamResponse,
24
24
  )
25
25
  from ...tools import (
26
- AsyncContextTool,
27
26
  AsyncContextToolkit,
28
- AsyncTool,
29
27
  AsyncToolkit,
30
- ContextTool,
31
28
  ContextToolkit,
32
- Tool,
33
29
  Toolkit,
34
30
  )
35
31
  from ..base import BaseProvider
@@ -69,11 +65,8 @@ class GoogleProvider(BaseProvider[Client]):
69
65
  *,
70
66
  model_id: GoogleModelId,
71
67
  messages: Sequence[Message],
72
- tools: Sequence[Tool] | Toolkit | None = None,
73
- format: type[FormattableT]
74
- | Format[FormattableT]
75
- | OutputParser[FormattableT]
76
- | None = None,
68
+ toolkit: Toolkit,
69
+ format: FormatSpec[FormattableT] | None = None,
77
70
  **params: Unpack[Params],
78
71
  ) -> Response | Response[FormattableT]:
79
72
  """Generate an `llm.Response` by synchronously calling the Google GenAI API.
@@ -91,7 +84,7 @@ class GoogleProvider(BaseProvider[Client]):
91
84
  input_messages, format, kwargs = _utils.encode_request(
92
85
  model_id=model_id,
93
86
  messages=messages,
94
- tools=tools,
87
+ tools=toolkit,
95
88
  format=format,
96
89
  params=params,
97
90
  )
@@ -108,7 +101,7 @@ class GoogleProvider(BaseProvider[Client]):
108
101
  model_id=model_id,
109
102
  provider_model_name=model_name(model_id),
110
103
  params=params,
111
- tools=tools,
104
+ tools=toolkit,
112
105
  input_messages=input_messages,
113
106
  assistant_message=assistant_message,
114
107
  finish_reason=finish_reason,
@@ -122,13 +115,8 @@ class GoogleProvider(BaseProvider[Client]):
122
115
  ctx: Context[DepsT],
123
116
  model_id: GoogleModelId,
124
117
  messages: Sequence[Message],
125
- tools: Sequence[Tool | ContextTool[DepsT]]
126
- | ContextToolkit[DepsT]
127
- | None = None,
128
- format: type[FormattableT]
129
- | Format[FormattableT]
130
- | OutputParser[FormattableT]
131
- | None = None,
118
+ toolkit: ContextToolkit[DepsT],
119
+ format: FormatSpec[FormattableT] | None = None,
132
120
  **params: Unpack[Params],
133
121
  ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
134
122
  """Generate an `llm.ContextResponse` by synchronously calling the Google GenAI API.
@@ -147,7 +135,7 @@ class GoogleProvider(BaseProvider[Client]):
147
135
  input_messages, format, kwargs = _utils.encode_request(
148
136
  model_id=model_id,
149
137
  messages=messages,
150
- tools=tools,
138
+ tools=toolkit,
151
139
  format=format,
152
140
  params=params,
153
141
  )
@@ -164,7 +152,7 @@ class GoogleProvider(BaseProvider[Client]):
164
152
  model_id=model_id,
165
153
  provider_model_name=model_name(model_id),
166
154
  params=params,
167
- tools=tools,
155
+ tools=toolkit,
168
156
  input_messages=input_messages,
169
157
  assistant_message=assistant_message,
170
158
  finish_reason=finish_reason,
@@ -177,11 +165,8 @@ class GoogleProvider(BaseProvider[Client]):
177
165
  *,
178
166
  model_id: GoogleModelId,
179
167
  messages: Sequence[Message],
180
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
181
- format: type[FormattableT]
182
- | Format[FormattableT]
183
- | OutputParser[FormattableT]
184
- | None = None,
168
+ toolkit: AsyncToolkit,
169
+ format: FormatSpec[FormattableT] | None = None,
185
170
  **params: Unpack[Params],
186
171
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
187
172
  """Generate an `llm.AsyncResponse` by asynchronously calling the Google GenAI API.
@@ -199,7 +184,7 @@ class GoogleProvider(BaseProvider[Client]):
199
184
  input_messages, format, kwargs = _utils.encode_request(
200
185
  model_id=model_id,
201
186
  messages=messages,
202
- tools=tools,
187
+ tools=toolkit,
203
188
  format=format,
204
189
  params=params,
205
190
  )
@@ -216,7 +201,7 @@ class GoogleProvider(BaseProvider[Client]):
216
201
  model_id=model_id,
217
202
  provider_model_name=model_name(model_id),
218
203
  params=params,
219
- tools=tools,
204
+ tools=toolkit,
220
205
  input_messages=input_messages,
221
206
  assistant_message=assistant_message,
222
207
  finish_reason=finish_reason,
@@ -230,13 +215,8 @@ class GoogleProvider(BaseProvider[Client]):
230
215
  ctx: Context[DepsT],
231
216
  model_id: GoogleModelId,
232
217
  messages: Sequence[Message],
233
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
234
- | AsyncContextToolkit[DepsT]
235
- | None = None,
236
- format: type[FormattableT]
237
- | Format[FormattableT]
238
- | OutputParser[FormattableT]
239
- | None = None,
218
+ toolkit: AsyncContextToolkit[DepsT],
219
+ format: FormatSpec[FormattableT] | None = None,
240
220
  **params: Unpack[Params],
241
221
  ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
242
222
  """Generate an `llm.AsyncContextResponse` by asynchronously calling the Google GenAI API.
@@ -255,7 +235,7 @@ class GoogleProvider(BaseProvider[Client]):
255
235
  input_messages, format, kwargs = _utils.encode_request(
256
236
  model_id=model_id,
257
237
  messages=messages,
258
- tools=tools,
238
+ tools=toolkit,
259
239
  format=format,
260
240
  params=params,
261
241
  )
@@ -272,7 +252,7 @@ class GoogleProvider(BaseProvider[Client]):
272
252
  model_id=model_id,
273
253
  provider_model_name=model_name(model_id),
274
254
  params=params,
275
- tools=tools,
255
+ tools=toolkit,
276
256
  input_messages=input_messages,
277
257
  assistant_message=assistant_message,
278
258
  finish_reason=finish_reason,
@@ -285,11 +265,8 @@ class GoogleProvider(BaseProvider[Client]):
285
265
  *,
286
266
  model_id: GoogleModelId,
287
267
  messages: Sequence[Message],
288
- tools: Sequence[Tool] | Toolkit | None = None,
289
- format: type[FormattableT]
290
- | Format[FormattableT]
291
- | OutputParser[FormattableT]
292
- | None = None,
268
+ toolkit: Toolkit,
269
+ format: FormatSpec[FormattableT] | None = None,
293
270
  **params: Unpack[Params],
294
271
  ) -> StreamResponse | StreamResponse[FormattableT]:
295
272
  """Generate an `llm.StreamResponse` by synchronously streaming from the Google GenAI API.
@@ -307,7 +284,7 @@ class GoogleProvider(BaseProvider[Client]):
307
284
  input_messages, format, kwargs = _utils.encode_request(
308
285
  model_id=model_id,
309
286
  messages=messages,
310
- tools=tools,
287
+ tools=toolkit,
311
288
  format=format,
312
289
  params=params,
313
290
  )
@@ -324,7 +301,7 @@ class GoogleProvider(BaseProvider[Client]):
324
301
  model_id=model_id,
325
302
  provider_model_name=model_name(model_id),
326
303
  params=params,
327
- tools=tools,
304
+ tools=toolkit,
328
305
  input_messages=input_messages,
329
306
  chunk_iterator=chunk_iterator,
330
307
  format=format,
@@ -336,13 +313,8 @@ class GoogleProvider(BaseProvider[Client]):
336
313
  ctx: Context[DepsT],
337
314
  model_id: GoogleModelId,
338
315
  messages: Sequence[Message],
339
- tools: Sequence[Tool | ContextTool[DepsT]]
340
- | ContextToolkit[DepsT]
341
- | None = None,
342
- format: type[FormattableT]
343
- | Format[FormattableT]
344
- | OutputParser[FormattableT]
345
- | None = None,
316
+ toolkit: ContextToolkit[DepsT],
317
+ format: FormatSpec[FormattableT] | None = None,
346
318
  **params: Unpack[Params],
347
319
  ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
348
320
  """Generate an `llm.ContextStreamResponse` by synchronously streaming from the Google GenAI API.
@@ -361,7 +333,7 @@ class GoogleProvider(BaseProvider[Client]):
361
333
  input_messages, format, kwargs = _utils.encode_request(
362
334
  model_id=model_id,
363
335
  messages=messages,
364
- tools=tools,
336
+ tools=toolkit,
365
337
  format=format,
366
338
  params=params,
367
339
  )
@@ -378,7 +350,7 @@ class GoogleProvider(BaseProvider[Client]):
378
350
  model_id=model_id,
379
351
  provider_model_name=model_name(model_id),
380
352
  params=params,
381
- tools=tools,
353
+ tools=toolkit,
382
354
  input_messages=input_messages,
383
355
  chunk_iterator=chunk_iterator,
384
356
  format=format,
@@ -389,11 +361,8 @@ class GoogleProvider(BaseProvider[Client]):
389
361
  *,
390
362
  model_id: GoogleModelId,
391
363
  messages: Sequence[Message],
392
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
393
- format: type[FormattableT]
394
- | Format[FormattableT]
395
- | OutputParser[FormattableT]
396
- | None = None,
364
+ toolkit: AsyncToolkit,
365
+ format: FormatSpec[FormattableT] | None = None,
397
366
  **params: Unpack[Params],
398
367
  ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
399
368
  """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Google GenAI API.
@@ -411,7 +380,7 @@ class GoogleProvider(BaseProvider[Client]):
411
380
  input_messages, format, kwargs = _utils.encode_request(
412
381
  model_id=model_id,
413
382
  messages=messages,
414
- tools=tools,
383
+ tools=toolkit,
415
384
  format=format,
416
385
  params=params,
417
386
  )
@@ -428,7 +397,7 @@ class GoogleProvider(BaseProvider[Client]):
428
397
  model_id=model_id,
429
398
  provider_model_name=model_name(model_id),
430
399
  params=params,
431
- tools=tools,
400
+ tools=toolkit,
432
401
  input_messages=input_messages,
433
402
  chunk_iterator=chunk_iterator,
434
403
  format=format,
@@ -440,13 +409,8 @@ class GoogleProvider(BaseProvider[Client]):
440
409
  ctx: Context[DepsT],
441
410
  model_id: GoogleModelId,
442
411
  messages: Sequence[Message],
443
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
444
- | AsyncContextToolkit[DepsT]
445
- | None = None,
446
- format: type[FormattableT]
447
- | Format[FormattableT]
448
- | OutputParser[FormattableT]
449
- | None = None,
412
+ toolkit: AsyncContextToolkit[DepsT],
413
+ format: FormatSpec[FormattableT] | None = None,
450
414
  **params: Unpack[Params],
451
415
  ) -> (
452
416
  AsyncContextStreamResponse[DepsT]
@@ -468,7 +432,7 @@ class GoogleProvider(BaseProvider[Client]):
468
432
  input_messages, format, kwargs = _utils.encode_request(
469
433
  model_id=model_id,
470
434
  messages=messages,
471
- tools=tools,
435
+ tools=toolkit,
472
436
  format=format,
473
437
  params=params,
474
438
  )
@@ -485,7 +449,7 @@ class GoogleProvider(BaseProvider[Client]):
485
449
  model_id=model_id,
486
450
  provider_model_name=model_name(model_id),
487
451
  params=params,
488
- tools=tools,
452
+ tools=toolkit,
489
453
  input_messages=input_messages,
490
454
  chunk_iterator=chunk_iterator,
491
455
  format=format,