mirascope 1.17.0__py3-none-any.whl → 1.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. mirascope/core/__init__.py +4 -0
  2. mirascope/core/base/stream_config.py +1 -1
  3. mirascope/core/gemini/__init__.py +10 -0
  4. mirascope/core/google/__init__.py +29 -0
  5. mirascope/core/google/_call.py +67 -0
  6. mirascope/core/google/_call_kwargs.py +13 -0
  7. mirascope/core/google/_utils/__init__.py +16 -0
  8. mirascope/core/google/_utils/_calculate_cost.py +88 -0
  9. mirascope/core/google/_utils/_convert_common_call_params.py +39 -0
  10. mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +27 -0
  11. mirascope/core/google/_utils/_convert_message_params.py +177 -0
  12. mirascope/core/google/_utils/_get_json_output.py +37 -0
  13. mirascope/core/google/_utils/_handle_stream.py +35 -0
  14. mirascope/core/google/_utils/_message_param_converter.py +153 -0
  15. mirascope/core/google/_utils/_setup_call.py +180 -0
  16. mirascope/core/google/call_params.py +22 -0
  17. mirascope/core/google/call_response.py +202 -0
  18. mirascope/core/google/call_response_chunk.py +97 -0
  19. mirascope/core/google/dynamic_config.py +26 -0
  20. mirascope/core/google/stream.py +128 -0
  21. mirascope/core/google/tool.py +104 -0
  22. mirascope/core/vertex/__init__.py +15 -0
  23. mirascope/llm/_protocols.py +1 -0
  24. mirascope/llm/llm_call.py +4 -0
  25. mirascope/llm/llm_override.py +16 -3
  26. {mirascope-1.17.0.dist-info → mirascope-1.18.0.dist-info}/METADATA +4 -1
  27. {mirascope-1.17.0.dist-info → mirascope-1.18.0.dist-info}/RECORD +29 -11
  28. {mirascope-1.17.0.dist-info → mirascope-1.18.0.dist-info}/WHEEL +0 -0
  29. {mirascope-1.17.0.dist-info → mirascope-1.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -24,6 +24,9 @@ with suppress(ImportError):
24
24
  with suppress(ImportError):
25
25
  from . import cohere as cohere
26
26
 
27
+ with suppress(ImportError):
28
+ from . import google as google
29
+
27
30
  with suppress(ImportError):
28
31
  from . import gemini as gemini
29
32
 
@@ -57,6 +60,7 @@ __all__ = [
57
60
  "cohere",
58
61
  "FromCallArgs",
59
62
  "gemini",
63
+ "google",
60
64
  "groq",
61
65
  "litellm",
62
66
  "merge_decorators",
@@ -1,4 +1,4 @@
1
- from typing import TypedDict
1
+ from typing_extensions import TypedDict
2
2
 
3
3
 
4
4
  class StreamConfig(TypedDict):
@@ -1,5 +1,7 @@
1
1
  """The Mirascope Gemini Module."""
2
2
 
3
+ import inspect
4
+ import warnings
3
5
  from typing import TypeAlias
4
6
 
5
7
  from google.generativeai.protos import FunctionResponse
@@ -17,6 +19,14 @@ from .tool import GeminiTool
17
19
 
18
20
  GeminiMessageParam: TypeAlias = ContentDict | FunctionResponse | BaseMessageParam
19
21
 
22
+ warnings.warn(
23
+ inspect.cleandoc("""
24
+ The `mirascope.core.gemini` module is deprecated and will be removed in a future release.
25
+ Please use the `mirascope.core.google` module instead.
26
+ """),
27
+ category=DeprecationWarning,
28
+ )
29
+
20
30
  __all__ = [
21
31
  "call",
22
32
  "GeminiDynamicConfig",
@@ -0,0 +1,29 @@
1
+ """The Mirascope Google Module."""
2
+
3
+ from typing import TypeAlias
4
+
5
+ from google.genai.types import ContentDict, FunctionResponse
6
+
7
+ from ..base import BaseMessageParam
8
+ from ._call import google_call
9
+ from ._call import google_call as call
10
+ from .call_params import GoogleCallParams
11
+ from .call_response import GoogleCallResponse
12
+ from .call_response_chunk import GoogleCallResponseChunk
13
+ from .dynamic_config import GoogleDynamicConfig
14
+ from .stream import GoogleStream
15
+ from .tool import GoogleTool
16
+
17
+ GoogleMessageParam: TypeAlias = ContentDict | FunctionResponse | BaseMessageParam
18
+
19
+ __all__ = [
20
+ "call",
21
+ "GoogleDynamicConfig",
22
+ "GoogleCallParams",
23
+ "GoogleCallResponse",
24
+ "GoogleCallResponseChunk",
25
+ "GoogleMessageParam",
26
+ "GoogleStream",
27
+ "GoogleTool",
28
+ "google_call",
29
+ ]
@@ -0,0 +1,67 @@
1
+ """The `google_call` decorator for functions as LLM calls."""
2
+
3
+ from ..base import call_factory
4
+ from ._utils import (
5
+ get_json_output,
6
+ handle_stream,
7
+ handle_stream_async,
8
+ setup_call,
9
+ )
10
+ from .call_params import GoogleCallParams
11
+ from .call_response import GoogleCallResponse
12
+ from .call_response_chunk import GoogleCallResponseChunk
13
+ from .stream import GoogleStream
14
+ from .tool import GoogleTool
15
+
16
+ google_call = call_factory(
17
+ TCallResponse=GoogleCallResponse,
18
+ TCallResponseChunk=GoogleCallResponseChunk,
19
+ TStream=GoogleStream,
20
+ TToolType=GoogleTool,
21
+ default_call_params=GoogleCallParams(),
22
+ setup_call=setup_call,
23
+ get_json_output=get_json_output,
24
+ handle_stream=handle_stream, # pyright: ignore [reportArgumentType]
25
+ handle_stream_async=handle_stream_async, # pyright: ignore [reportArgumentType]
26
+ )
27
+ """A decorator for calling the Google API with a typed function.
28
+
29
+ usage docs: learn/calls.md
30
+
31
+ This decorator is used to wrap a typed function that calls the Google API. It parses
32
+ the prompt template of the wrapped function as the messages array and templates the input
33
+ arguments for the function into each message's template.
34
+
35
+ Example:
36
+
37
+ ```python
38
+ from mirascope.core import prompt_template
39
+ from mirascope.core.google import google_call
40
+
41
+
42
+ @google_call("google-1.5-flash")
43
+ def recommend_book(genre: str) -> str:
44
+ return f"Recommend a {genre} book"
45
+
46
+ response = recommend_book("fantasy")
47
+ print(response.content)
48
+ ```
49
+
50
+ Args:
51
+ model (str): The Google model to use in the API call.
52
+ stream (bool): Whether to stream the response from the API call.
53
+ tools (list[BaseTool | Callable]): The tools to use in the Google API call.
54
+ response_model (BaseModel | BaseType): The response model into which the response
55
+ should be structured.
56
+ output_parser (Callable[[GoogleCallResponse | ResponseModelT], Any]): A function
57
+ for parsing the call response whose value will be returned in place of the
58
+ original call response.
59
+ json_modem (bool): Whether to use JSON Mode.
60
+ client (object): An optional custom client to use in place of the default client.
61
+ call_params (GoogleCallParams): The `GoogleCallParams` call parameters to use in the
62
+ API call.
63
+
64
+ Returns:
65
+ decorator (Callable): The decorator for turning a typed function into a Google API
66
+ call.
67
+ """
@@ -0,0 +1,13 @@
1
+ """This module contains the type definition for the Google call keyword arguments."""
2
+
3
+ from collections.abc import Sequence
4
+
5
+ from google.genai.types import ContentOrDict, Tool
6
+
7
+ from ..base import BaseCallKwargs
8
+ from .call_params import GoogleCallParams
9
+
10
+
11
+ class GoogleCallKwargs(GoogleCallParams, BaseCallKwargs[Tool]):
12
+ model: str
13
+ contents: Sequence[ContentOrDict]
@@ -0,0 +1,16 @@
1
+ """Google utilities for decorator factories."""
2
+
3
+ from ._calculate_cost import calculate_cost
4
+ from ._convert_message_params import convert_message_params
5
+ from ._get_json_output import get_json_output
6
+ from ._handle_stream import handle_stream, handle_stream_async
7
+ from ._setup_call import setup_call
8
+
9
+ __all__ = [
10
+ "calculate_cost",
11
+ "convert_message_params",
12
+ "get_json_output",
13
+ "handle_stream",
14
+ "handle_stream_async",
15
+ "setup_call",
16
+ ]
@@ -0,0 +1,88 @@
1
+ """Calculate the cost of a Gemini API call."""
2
+
3
+
4
+ def calculate_cost(
5
+ input_tokens: int | float | None, output_tokens: int | float | None, model: str
6
+ ) -> float | None:
7
+ """Calculate the cost of a Google API call.
8
+
9
+ https://ai.google.dev/pricing
10
+
11
+ Pricing (per 1M tokens):
12
+
13
+ Model Input (<128K) Output (<128K) Input (>128K) Output (>128K)
14
+ gemini-2.0-flash $0.10 $0.40 $0.10 $0.40
15
+ gemini-2.0-flash-lite $0.075 $0.30 $0.075 $0.30
16
+ gemini-1.5-flash $0.075 $0.30 $0.15 $0.60
17
+ gemini-1.5-flash-8b $0.0375 $0.15 $0.075 $0.30
18
+ gemini-1.5-pro $1.25 $5.00 $2.50 $10.00
19
+ gemini-1.0-pro $0.50 $1.50 $0.50 $1.50
20
+
21
+ Args:
22
+ input_tokens: Number of input tokens
23
+ output_tokens: Number of output tokens
24
+ model: Model name to use for pricing calculation
25
+
26
+ Returns:
27
+ Total cost in USD or None if invalid input
28
+ """
29
+ pricing = {
30
+ "gemini-2.0-flash": {
31
+ "prompt_short": 0.000_000_10,
32
+ "completion_short": 0.000_000_40,
33
+ "prompt_long": 0.000_000_10,
34
+ "completion_long": 0.000_000_40,
35
+ },
36
+ "gemini-2.0-flash-lite": {
37
+ "prompt_short": 0.000_000_075,
38
+ "completion_short": 0.000_000_30,
39
+ "prompt_long": 0.000_000_075,
40
+ "completion_long": 0.000_000_30,
41
+ },
42
+ "gemini-1.5-flash": {
43
+ "prompt_short": 0.000_000_075,
44
+ "completion_short": 0.000_000_30,
45
+ "prompt_long": 0.000_000_15,
46
+ "completion_long": 0.000_000_60,
47
+ },
48
+ "gemini-1.5-flash-8b": {
49
+ "prompt_short": 0.000_000_037_5,
50
+ "completion_short": 0.000_000_15,
51
+ "prompt_long": 0.000_000_075,
52
+ "completion_long": 0.000_000_30,
53
+ },
54
+ "gemini-1.5-pro": {
55
+ "prompt_short": 0.000_001_25,
56
+ "completion_short": 0.000_005,
57
+ "prompt_long": 0.000_002_5,
58
+ "completion_long": 0.000_01,
59
+ },
60
+ "gemini-1.0-pro": {
61
+ "prompt_short": 0.000_000_5,
62
+ "completion_short": 0.000_001_5,
63
+ "prompt_long": 0.000_000_5,
64
+ "completion_long": 0.000_001_5,
65
+ },
66
+ }
67
+
68
+ if input_tokens is None or output_tokens is None:
69
+ return None
70
+
71
+ try:
72
+ model_pricing = pricing[model]
73
+ except KeyError:
74
+ return None
75
+
76
+ # Determine if we're using long context pricing
77
+ use_long_context = input_tokens > 128_000
78
+
79
+ prompt_price = model_pricing["prompt_long" if use_long_context else "prompt_short"]
80
+ completion_price = model_pricing[
81
+ "completion_long" if use_long_context else "completion_short"
82
+ ]
83
+
84
+ prompt_cost = input_tokens * prompt_price
85
+ completion_cost = output_tokens * completion_price
86
+ total_cost = prompt_cost + completion_cost
87
+
88
+ return total_cost
@@ -0,0 +1,39 @@
1
+ from typing import cast
2
+
3
+ from google.genai.types import (
4
+ GenerationConfigDict,
5
+ )
6
+
7
+ from ...base.call_params import CommonCallParams
8
+ from ..call_params import GoogleCallParams
9
+
10
+ GOOGLE_PARAM_MAPPING = {
11
+ "temperature": "temperature",
12
+ "max_tokens": "max_output_tokens",
13
+ "top_p": "top_p",
14
+ "stop": "stop_sequences",
15
+ }
16
+
17
+
18
+ def convert_common_call_params(common_params: CommonCallParams) -> GoogleCallParams:
19
+ """Convert CommonCallParams to Google parameters."""
20
+ generation_config = {}
21
+
22
+ for key, value in common_params.items():
23
+ if key not in GOOGLE_PARAM_MAPPING or value is None:
24
+ continue
25
+
26
+ if key == "stop":
27
+ generation_config["stop_sequences"] = (
28
+ [value] if isinstance(value, str) else value
29
+ )
30
+ else:
31
+ generation_config[GOOGLE_PARAM_MAPPING[key]] = value
32
+
33
+ if not generation_config:
34
+ return cast(GoogleCallParams, {})
35
+
36
+ return cast(
37
+ GoogleCallParams,
38
+ {"generation_config": cast(GenerationConfigDict, generation_config)},
39
+ )
@@ -0,0 +1,27 @@
1
+ from mirascope.core.base._utils._convert_provider_finish_reason_to_finish_reason import (
2
+ FinishReasonMappingValue,
3
+ _convert_finish_reasons_to_common_finish_reasons_from_mapping,
4
+ )
5
+ from mirascope.core.base.types import FinishReason
6
+
7
+ _FinishReasonMapping: dict[str, FinishReasonMappingValue] = {
8
+ "FINISH_REASON_UNSPECIFIED": "stop",
9
+ "STOP": "stop",
10
+ "MAX_TOKENS": "length",
11
+ "SAFETY": "content_filter",
12
+ "RECITATION": "stop",
13
+ "OTHER": "stop",
14
+ "BLOCKLIST": "stop",
15
+ "PROHIBITED_CONTENT": "stop",
16
+ "SPII": "stop",
17
+ "MALFORMED_FUNCTION_CALL": "stop",
18
+ }
19
+
20
+
21
+ def _convert_finish_reasons_to_common_finish_reasons(
22
+ finish_reasons: list[str],
23
+ ) -> list[FinishReason] | None:
24
+ """Provider-agnostic finish reasons."""
25
+ return _convert_finish_reasons_to_common_finish_reasons_from_mapping(
26
+ finish_reasons, _FinishReasonMapping
27
+ )
@@ -0,0 +1,177 @@
1
+ """Utility for converting `BaseMessageParam` to `ContentsType`"""
2
+
3
+ import base64
4
+ import io
5
+
6
+ import PIL.Image
7
+ from google.genai import Client
8
+ from google.genai.types import BlobDict, ContentDict, FileDataDict, PartDict
9
+
10
+ from ...base import BaseMessageParam
11
+ from ...base._utils import get_audio_type
12
+ from ...base._utils._parse_content_template import _load_media
13
+
14
+
15
+ def convert_message_params(
16
+ message_params: list[BaseMessageParam | ContentDict], client: Client
17
+ ) -> list[ContentDict]:
18
+ converted_message_params = []
19
+ for message_param in message_params:
20
+ if not isinstance(message_param, BaseMessageParam):
21
+ converted_message_params.append(message_param)
22
+ elif (role := message_param.role) == "system":
23
+ if not isinstance(message_param.content, str):
24
+ raise ValueError(
25
+ "System message content must be a single text string."
26
+ ) # pragma: no cover
27
+ converted_message_params += [
28
+ {
29
+ "role": "system",
30
+ "parts": [PartDict(text=message_param.content)],
31
+ }
32
+ ]
33
+ elif isinstance((content := message_param.content), str):
34
+ converted_message_params.append(
35
+ {
36
+ "role": role if role == "user" else "model",
37
+ "parts": [PartDict(text=content)],
38
+ }
39
+ )
40
+ else:
41
+ converted_content = []
42
+ for part in content:
43
+ if part.type == "text":
44
+ converted_content.append(PartDict(text=part.text))
45
+ elif part.type == "image":
46
+ if part.media_type not in [
47
+ "image/jpeg",
48
+ "image/png",
49
+ "image/webp",
50
+ "image/heic",
51
+ "image/heif",
52
+ ]:
53
+ raise ValueError(
54
+ f"Unsupported image media type: {part.media_type}. "
55
+ "Google currently only supports JPEG, PNG, WebP, HEIC, "
56
+ "and HEIF images."
57
+ )
58
+ converted_content.append(
59
+ PartDict(
60
+ inline_data=BlobDict(
61
+ data=part.image, mime_type=part.media_type
62
+ )
63
+ )
64
+ )
65
+ elif part.type == "image_url":
66
+ if part.url.startswith(("https://", "http://")):
67
+ downloaded_image = io.BytesIO(_load_media(part.url))
68
+ image = PIL.Image.open(downloaded_image)
69
+ media_type = (
70
+ PIL.Image.MIME[image.format]
71
+ if image.format
72
+ else "image/unknown"
73
+ )
74
+ if media_type not in [
75
+ "image/jpeg",
76
+ "image/png",
77
+ "image/webp",
78
+ "image/heic",
79
+ "image/heif",
80
+ ]:
81
+ raise ValueError(
82
+ f"Unsupported image media type: {media_type}. "
83
+ "Google currently only supports JPEG, PNG, WebP, HEIC, "
84
+ "and HEIF images."
85
+ )
86
+ if client.vertexai:
87
+ uri = part.url
88
+ else:
89
+ downloaded_image.seek(0)
90
+ file_ref = client.files.upload(
91
+ file=downloaded_image, config={"mime_type": media_type}
92
+ )
93
+ uri = file_ref.uri
94
+ media_type = file_ref.mime_type
95
+
96
+ converted_content.append(
97
+ PartDict(
98
+ file_data=FileDataDict(
99
+ file_uri=uri, mime_type=media_type
100
+ )
101
+ )
102
+ )
103
+ else:
104
+ media_type = "image/unknown"
105
+ uri = part.url
106
+ converted_content.append(
107
+ PartDict(
108
+ file_data=FileDataDict(
109
+ file_uri=uri, mime_type=media_type
110
+ )
111
+ )
112
+ )
113
+ elif part.type == "audio":
114
+ if part.media_type not in [
115
+ "audio/wav",
116
+ "audio/mp3",
117
+ "audio/aiff",
118
+ "audio/aac",
119
+ "audio/ogg",
120
+ "audio/flac",
121
+ ]:
122
+ raise ValueError(
123
+ f"Unsupported audio media type: {part.media_type}. "
124
+ "Google currently only supports WAV, MP3, AIFF, AAC, OGG, "
125
+ "and FLAC audio file types."
126
+ )
127
+ converted_content.append(
128
+ PartDict(
129
+ inline_data=BlobDict(
130
+ data=part.audio
131
+ if isinstance(part.audio, bytes)
132
+ else base64.b64decode(part.audio),
133
+ mime_type=part.media_type,
134
+ )
135
+ )
136
+ )
137
+ elif part.type == "audio_url":
138
+ if part.url.startswith(("https://", "http://")):
139
+ audio = _load_media(part.url)
140
+ audio_type = get_audio_type(audio)
141
+ if audio_type not in [
142
+ "audio/wav",
143
+ "audio/mp3",
144
+ "audio/aiff",
145
+ "audio/aac",
146
+ "audio/ogg",
147
+ "audio/flac",
148
+ ]:
149
+ raise ValueError(
150
+ f"Unsupported audio media type: {audio_type}. "
151
+ "Google currently only supports WAV, MP3, AIFF, AAC, OGG, "
152
+ "and FLAC audio file types."
153
+ )
154
+ converted_content.append(
155
+ {"mime_type": audio_type, "data": audio}
156
+ )
157
+ else:
158
+ audio_type = "audio/unknown"
159
+ converted_content.append(
160
+ PartDict(
161
+ file_data=FileDataDict(
162
+ file_uri=part.url, mime_type=audio_type
163
+ )
164
+ )
165
+ )
166
+ else:
167
+ raise ValueError(
168
+ "Google currently only supports text, image, and audio parts. "
169
+ f"Part provided: {part.type}"
170
+ )
171
+ converted_message_params.append(
172
+ {
173
+ "role": role if role == "user" else "model",
174
+ "parts": converted_content,
175
+ }
176
+ )
177
+ return converted_message_params
@@ -0,0 +1,37 @@
1
+ """Get JSON output from a Google response."""
2
+
3
+ import json
4
+
5
+ from proto.marshal.collections import RepeatedComposite
6
+
7
+ from ..call_response import GoogleCallResponse
8
+ from ..call_response_chunk import GoogleCallResponseChunk
9
+
10
+
11
+ def get_json_output(
12
+ response: GoogleCallResponse | GoogleCallResponseChunk, json_mode: bool
13
+ ) -> str:
14
+ """Extracts the JSON output from a Google response."""
15
+ if isinstance(response, GoogleCallResponse):
16
+ if json_mode and (content := response.content):
17
+ json_start = content.index("{")
18
+ json_end = content.rfind("}")
19
+ return content[json_start : json_end + 1]
20
+ elif tool_calls := [
21
+ function_call
22
+ for function_call in (response.response.function_calls or []) # pyright: ignore [reportOptionalSubscript, reportOptionalIterable, reportOptionalMemberAccess]
23
+ if function_call.args # pyright: ignore [reportOptionalSubscript, reportOptionalIterable, reportOptionalMemberAccess]
24
+ ]:
25
+ return json.dumps(
26
+ {
27
+ k: v if not isinstance(v, RepeatedComposite) else list(v)
28
+ for k, v in (tool_calls[0].args or {}).items() # pyright: ignore [reportOptionalMemberAccess]
29
+ }
30
+ if isinstance(tool_calls, list) and tool_calls[0]
31
+ else {}
32
+ )
33
+ else:
34
+ raise ValueError("No tool call or JSON object found in response.")
35
+ elif not json_mode:
36
+ raise ValueError("Google only supports structured streaming in json mode.")
37
+ return response.content
@@ -0,0 +1,35 @@
1
+ """Handles streaming content and tools from the Google API."""
2
+
3
+ from collections.abc import AsyncGenerator, Generator
4
+
5
+ from google.genai.types import GenerateContentResponse
6
+
7
+ from ..call_response_chunk import GoogleCallResponseChunk
8
+ from ..tool import GoogleTool
9
+
10
+
11
+ def handle_stream(
12
+ stream: Generator[GenerateContentResponse, None, None],
13
+ tool_types: list[type[GoogleTool]] | None = None,
14
+ partial_tools: bool = False,
15
+ ) -> Generator[tuple[GoogleCallResponseChunk, None], None, None]:
16
+ """Iterator over the stream and constructs tools as they are streamed.
17
+
18
+ Note: google does not currently support streaming tools.
19
+ """
20
+ for chunk in stream:
21
+ yield GoogleCallResponseChunk(chunk=chunk), None
22
+
23
+
24
+ async def handle_stream_async(
25
+ stream: AsyncGenerator[GenerateContentResponse, None],
26
+ tool_types: list[type[GoogleTool]] | None = None,
27
+ partial_tools: bool = False,
28
+ ) -> AsyncGenerator[tuple[GoogleCallResponseChunk, None], None]:
29
+ """
30
+ Async iterator over the stream and constructs tools as they are streamed.
31
+
32
+ Note: google does not currently support streaming tools.
33
+ """
34
+ async for chunk in stream:
35
+ yield GoogleCallResponseChunk(chunk=chunk), None