mirascope 1.18.3__py3-none-any.whl → 1.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +20 -2
- mirascope/beta/openai/__init__.py +1 -1
- mirascope/beta/openai/realtime/__init__.py +1 -1
- mirascope/beta/openai/realtime/tool.py +1 -1
- mirascope/beta/rag/__init__.py +2 -2
- mirascope/beta/rag/base/__init__.py +2 -2
- mirascope/beta/rag/weaviate/__init__.py +1 -1
- mirascope/core/__init__.py +26 -8
- mirascope/core/anthropic/__init__.py +3 -3
- mirascope/core/anthropic/_utils/_calculate_cost.py +114 -47
- mirascope/core/anthropic/call_response.py +9 -1
- mirascope/core/anthropic/call_response_chunk.py +7 -0
- mirascope/core/anthropic/stream.py +3 -1
- mirascope/core/azure/__init__.py +2 -2
- mirascope/core/azure/_utils/_calculate_cost.py +4 -1
- mirascope/core/azure/call_response.py +9 -1
- mirascope/core/azure/call_response_chunk.py +5 -0
- mirascope/core/azure/stream.py +3 -1
- mirascope/core/base/__init__.py +11 -9
- mirascope/core/base/_utils/__init__.py +10 -10
- mirascope/core/base/_utils/_get_common_usage.py +8 -4
- mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +2 -2
- mirascope/core/base/_utils/_protocols.py +9 -8
- mirascope/core/base/call_response.py +22 -20
- mirascope/core/base/call_response_chunk.py +12 -1
- mirascope/core/base/stream.py +24 -21
- mirascope/core/base/tool.py +7 -5
- mirascope/core/base/types.py +22 -5
- mirascope/core/bedrock/__init__.py +3 -3
- mirascope/core/bedrock/_utils/_calculate_cost.py +4 -1
- mirascope/core/bedrock/call_response.py +8 -1
- mirascope/core/bedrock/call_response_chunk.py +5 -0
- mirascope/core/bedrock/stream.py +3 -1
- mirascope/core/cohere/__init__.py +2 -2
- mirascope/core/cohere/_utils/_calculate_cost.py +4 -3
- mirascope/core/cohere/call_response.py +9 -1
- mirascope/core/cohere/call_response_chunk.py +5 -0
- mirascope/core/cohere/stream.py +3 -1
- mirascope/core/gemini/__init__.py +2 -2
- mirascope/core/gemini/_utils/_calculate_cost.py +4 -1
- mirascope/core/gemini/_utils/_convert_message_params.py +1 -1
- mirascope/core/gemini/call_response.py +9 -1
- mirascope/core/gemini/call_response_chunk.py +5 -0
- mirascope/core/gemini/stream.py +3 -1
- mirascope/core/google/__init__.py +2 -2
- mirascope/core/google/_utils/_calculate_cost.py +141 -14
- mirascope/core/google/_utils/_convert_message_params.py +23 -51
- mirascope/core/google/_utils/_message_param_converter.py +34 -33
- mirascope/core/google/_utils/_validate_media_type.py +34 -0
- mirascope/core/google/call_response.py +26 -4
- mirascope/core/google/call_response_chunk.py +17 -9
- mirascope/core/google/stream.py +20 -2
- mirascope/core/groq/__init__.py +2 -2
- mirascope/core/groq/_utils/_calculate_cost.py +12 -11
- mirascope/core/groq/call_response.py +9 -1
- mirascope/core/groq/call_response_chunk.py +5 -0
- mirascope/core/groq/stream.py +3 -1
- mirascope/core/litellm/__init__.py +1 -1
- mirascope/core/litellm/_utils/_setup_call.py +7 -3
- mirascope/core/mistral/__init__.py +2 -2
- mirascope/core/mistral/_utils/_calculate_cost.py +10 -9
- mirascope/core/mistral/call_response.py +9 -1
- mirascope/core/mistral/call_response_chunk.py +5 -0
- mirascope/core/mistral/stream.py +3 -1
- mirascope/core/openai/__init__.py +2 -2
- mirascope/core/openai/_utils/_calculate_cost.py +78 -37
- mirascope/core/openai/call_params.py +13 -0
- mirascope/core/openai/call_response.py +14 -1
- mirascope/core/openai/call_response_chunk.py +12 -0
- mirascope/core/openai/stream.py +6 -4
- mirascope/core/vertex/__init__.py +1 -1
- mirascope/core/vertex/_utils/_calculate_cost.py +1 -0
- mirascope/core/vertex/_utils/_convert_message_params.py +1 -1
- mirascope/core/vertex/call_response.py +9 -1
- mirascope/core/vertex/call_response_chunk.py +5 -0
- mirascope/core/vertex/stream.py +3 -1
- mirascope/core/xai/__init__.py +28 -0
- mirascope/core/xai/_call.py +67 -0
- mirascope/core/xai/_utils/__init__.py +6 -0
- mirascope/core/xai/_utils/_calculate_cost.py +104 -0
- mirascope/core/xai/_utils/_setup_call.py +113 -0
- mirascope/core/xai/call_params.py +10 -0
- mirascope/core/xai/call_response.py +27 -0
- mirascope/core/xai/call_response_chunk.py +14 -0
- mirascope/core/xai/dynamic_config.py +8 -0
- mirascope/core/xai/py.typed +0 -0
- mirascope/core/xai/stream.py +57 -0
- mirascope/core/xai/tool.py +13 -0
- mirascope/integrations/_middleware_factory.py +6 -6
- mirascope/integrations/logfire/_utils.py +1 -1
- mirascope/llm/__init__.py +2 -2
- mirascope/llm/_protocols.py +34 -28
- mirascope/llm/call_response.py +16 -7
- mirascope/llm/llm_call.py +50 -46
- mirascope/llm/stream.py +43 -31
- mirascope/retries/__init__.py +1 -1
- mirascope/tools/__init__.py +2 -2
- {mirascope-1.18.3.dist-info → mirascope-1.19.0.dist-info}/METADATA +3 -1
- {mirascope-1.18.3.dist-info → mirascope-1.19.0.dist-info}/RECORD +101 -88
- {mirascope-1.18.3.dist-info → mirascope-1.19.0.dist-info}/WHEEL +0 -0
- {mirascope-1.18.3.dist-info → mirascope-1.19.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -78,6 +78,11 @@ class GeminiCallResponseChunk(
|
|
|
78
78
|
"""Returns the number of input tokens."""
|
|
79
79
|
return None
|
|
80
80
|
|
|
81
|
+
@property
|
|
82
|
+
def cached_tokens(self) -> None:
|
|
83
|
+
"""Returns the number of cached tokens."""
|
|
84
|
+
return None
|
|
85
|
+
|
|
81
86
|
@property
|
|
82
87
|
def output_tokens(self) -> None:
|
|
83
88
|
"""Returns the number of output tokens."""
|
mirascope/core/gemini/stream.py
CHANGED
|
@@ -69,7 +69,9 @@ class GeminiStream(
|
|
|
69
69
|
@property
|
|
70
70
|
def cost(self) -> float | None:
|
|
71
71
|
"""Returns the cost of the call."""
|
|
72
|
-
return calculate_cost(
|
|
72
|
+
return calculate_cost(
|
|
73
|
+
self.input_tokens, self.cached_tokens, self.output_tokens, self.model
|
|
74
|
+
)
|
|
73
75
|
|
|
74
76
|
def _construct_message_param(
|
|
75
77
|
self, tool_calls: list[FunctionCall] | None = None, content: str | None = None
|
|
@@ -17,13 +17,13 @@ from .tool import GoogleTool
|
|
|
17
17
|
GoogleMessageParam: TypeAlias = ContentDict | FunctionResponse | BaseMessageParam
|
|
18
18
|
|
|
19
19
|
__all__ = [
|
|
20
|
-
"call",
|
|
21
|
-
"GoogleDynamicConfig",
|
|
22
20
|
"GoogleCallParams",
|
|
23
21
|
"GoogleCallResponse",
|
|
24
22
|
"GoogleCallResponseChunk",
|
|
23
|
+
"GoogleDynamicConfig",
|
|
25
24
|
"GoogleMessageParam",
|
|
26
25
|
"GoogleStream",
|
|
27
26
|
"GoogleTool",
|
|
27
|
+
"call",
|
|
28
28
|
"google_call",
|
|
29
29
|
]
|
|
@@ -2,7 +2,10 @@
|
|
|
2
2
|
|
|
3
3
|
|
|
4
4
|
def calculate_cost(
|
|
5
|
-
input_tokens: int | float | None,
|
|
5
|
+
input_tokens: int | float | None,
|
|
6
|
+
cached_tokens: int | float | None,
|
|
7
|
+
output_tokens: int | float | None,
|
|
8
|
+
model: str,
|
|
6
9
|
) -> float | None:
|
|
7
10
|
"""Calculate the cost of a Google API call.
|
|
8
11
|
|
|
@@ -10,16 +13,31 @@ def calculate_cost(
|
|
|
10
13
|
|
|
11
14
|
Pricing (per 1M tokens):
|
|
12
15
|
|
|
13
|
-
Model
|
|
14
|
-
gemini-2.0-
|
|
15
|
-
gemini-2.0-
|
|
16
|
-
gemini-
|
|
17
|
-
gemini-
|
|
18
|
-
gemini-
|
|
19
|
-
gemini-
|
|
16
|
+
Model Input (<128K) Output (<128K) Input (>128K) Output (>128K) Cached
|
|
17
|
+
gemini-2.0-pro $1.25 $5.00 $2.50 $10.00 $0.625
|
|
18
|
+
gemini-2.0-pro-preview-1206 $1.25 $5.00 $2.50 $10.00 $0.625
|
|
19
|
+
gemini-2.0-flash $0.10 $0.40 $0.10 $0.40 $0.0375
|
|
20
|
+
gemini-2.0-flash-latest $0.10 $0.40 $0.10 $0.40 $0.0375
|
|
21
|
+
gemini-2.0-flash-001 $0.10 $0.40 $0.10 $0.40 $0.0375
|
|
22
|
+
gemini-2.0-flash-lite $0.075 $0.30 $0.075 $0.30 $0.0375
|
|
23
|
+
gemini-2.0-flash-lite-preview-02-05 $0.075 $0.30 $0.075 $0.30 $0.0375
|
|
24
|
+
gemini-1.5-pro $1.25 $5.00 $2.50 $10.00 $0.625
|
|
25
|
+
gemini-1.5-pro-latest $1.25 $5.00 $2.50 $10.00 $0.625
|
|
26
|
+
gemini-1.5-pro-001 $1.25 $5.00 $2.50 $10.00 $0.625
|
|
27
|
+
gemini-1.5-pro-002 $1.25 $5.00 $2.50 $10.00 $0.625
|
|
28
|
+
gemini-1.5-flash $0.075 $0.30 $0.15 $0.60 $0.0375
|
|
29
|
+
gemini-1.5-flash-latest $0.075 $0.30 $0.15 $0.60 $0.0375
|
|
30
|
+
gemini-1.5-flash-001 $0.075 $0.30 $0.15 $0.60 $0.0375
|
|
31
|
+
gemini-1.5-flash-002 $0.075 $0.30 $0.15 $0.60 $0.0375
|
|
32
|
+
gemini-1.5-flash-8b $0.0375 $0.15 $0.075 $0.30 $0.025
|
|
33
|
+
gemini-1.5-flash-8b-latest $0.0375 $0.15 $0.075 $0.30 $0.025
|
|
34
|
+
gemini-1.5-flash-8b-001 $0.0375 $0.15 $0.075 $0.30 $0.025
|
|
35
|
+
gemini-1.5-flash-8b-002 $0.0375 $0.15 $0.075 $0.30 $0.025
|
|
36
|
+
gemini-1.0-pro $0.50 $1.50 $0.50 $1.50 $0.00
|
|
20
37
|
|
|
21
38
|
Args:
|
|
22
39
|
input_tokens: Number of input tokens
|
|
40
|
+
cached_tokens: Number of cached tokens
|
|
23
41
|
output_tokens: Number of output tokens
|
|
24
42
|
model: Model name to use for pricing calculation
|
|
25
43
|
|
|
@@ -27,47 +45,154 @@ def calculate_cost(
|
|
|
27
45
|
Total cost in USD or None if invalid input
|
|
28
46
|
"""
|
|
29
47
|
pricing = {
|
|
48
|
+
"gemini-2.0-pro": {
|
|
49
|
+
"prompt_short": 0.000_001_25,
|
|
50
|
+
"completion_short": 0.000_005,
|
|
51
|
+
"prompt_long": 0.000_002_5,
|
|
52
|
+
"completion_long": 0.000_01,
|
|
53
|
+
"cached": 0.000_000_625,
|
|
54
|
+
},
|
|
55
|
+
"gemini-2.0-pro-preview-1206": {
|
|
56
|
+
"prompt_short": 0.000_001_25,
|
|
57
|
+
"completion_short": 0.000_005,
|
|
58
|
+
"prompt_long": 0.000_002_5,
|
|
59
|
+
"completion_long": 0.000_01,
|
|
60
|
+
"cached": 0.000_000_625,
|
|
61
|
+
},
|
|
30
62
|
"gemini-2.0-flash": {
|
|
31
63
|
"prompt_short": 0.000_000_10,
|
|
32
64
|
"completion_short": 0.000_000_40,
|
|
33
65
|
"prompt_long": 0.000_000_10,
|
|
34
66
|
"completion_long": 0.000_000_40,
|
|
67
|
+
"cached": 0.000_000_037_5,
|
|
68
|
+
},
|
|
69
|
+
"gemini-2.0-flash-latest": {
|
|
70
|
+
"prompt_short": 0.000_000_10,
|
|
71
|
+
"completion_short": 0.000_000_40,
|
|
72
|
+
"prompt_long": 0.000_000_10,
|
|
73
|
+
"completion_long": 0.000_000_40,
|
|
74
|
+
"cached": 0.000_000_037_5,
|
|
75
|
+
},
|
|
76
|
+
"gemini-2.0-flash-001": {
|
|
77
|
+
"prompt_short": 0.000_000_10,
|
|
78
|
+
"completion_short": 0.000_000_40,
|
|
79
|
+
"prompt_long": 0.000_000_10,
|
|
80
|
+
"completion_long": 0.000_000_40,
|
|
81
|
+
"cached": 0.000_000_037_5,
|
|
35
82
|
},
|
|
36
83
|
"gemini-2.0-flash-lite": {
|
|
37
84
|
"prompt_short": 0.000_000_075,
|
|
38
85
|
"completion_short": 0.000_000_30,
|
|
39
86
|
"prompt_long": 0.000_000_075,
|
|
40
87
|
"completion_long": 0.000_000_30,
|
|
88
|
+
"cached": 0.000_000_037_5,
|
|
89
|
+
},
|
|
90
|
+
"gemini-2.0-flash-lite-preview-02-05": {
|
|
91
|
+
"prompt_short": 0.000_000_075,
|
|
92
|
+
"completion_short": 0.000_000_30,
|
|
93
|
+
"prompt_long": 0.000_000_075,
|
|
94
|
+
"completion_long": 0.000_000_30,
|
|
95
|
+
"cached": 0.000_000_037_5,
|
|
96
|
+
},
|
|
97
|
+
"gemini-1.5-pro": {
|
|
98
|
+
"prompt_short": 0.000_001_25,
|
|
99
|
+
"completion_short": 0.000_005,
|
|
100
|
+
"prompt_long": 0.000_002_5,
|
|
101
|
+
"completion_long": 0.000_01,
|
|
102
|
+
"cached": 0.000_000_625,
|
|
103
|
+
},
|
|
104
|
+
"gemini-1.5-pro-latest": {
|
|
105
|
+
"prompt_short": 0.000_001_25,
|
|
106
|
+
"completion_short": 0.000_005,
|
|
107
|
+
"prompt_long": 0.000_002_5,
|
|
108
|
+
"completion_long": 0.000_01,
|
|
109
|
+
"cached": 0.000_000_625,
|
|
110
|
+
},
|
|
111
|
+
"gemini-1.5-pro-001": {
|
|
112
|
+
"prompt_short": 0.000_001_25,
|
|
113
|
+
"completion_short": 0.000_005,
|
|
114
|
+
"prompt_long": 0.000_002_5,
|
|
115
|
+
"completion_long": 0.000_01,
|
|
116
|
+
"cached": 0.000_000_625,
|
|
117
|
+
},
|
|
118
|
+
"gemini-1.5-pro-002": {
|
|
119
|
+
"prompt_short": 0.000_001_25,
|
|
120
|
+
"completion_short": 0.000_005,
|
|
121
|
+
"prompt_long": 0.000_002_5,
|
|
122
|
+
"completion_long": 0.000_01,
|
|
123
|
+
"cached": 0.000_000_625,
|
|
41
124
|
},
|
|
42
125
|
"gemini-1.5-flash": {
|
|
43
126
|
"prompt_short": 0.000_000_075,
|
|
44
127
|
"completion_short": 0.000_000_30,
|
|
45
128
|
"prompt_long": 0.000_000_15,
|
|
46
129
|
"completion_long": 0.000_000_60,
|
|
130
|
+
"cached": 0.000_000_037_5,
|
|
131
|
+
},
|
|
132
|
+
"gemini-1.5-flash-latest": {
|
|
133
|
+
"prompt_short": 0.000_000_075,
|
|
134
|
+
"completion_short": 0.000_000_30,
|
|
135
|
+
"prompt_long": 0.000_000_15,
|
|
136
|
+
"completion_long": 0.000_000_60,
|
|
137
|
+
"cached": 0.000_000_037_5,
|
|
138
|
+
},
|
|
139
|
+
"gemini-1.5-flash-001": {
|
|
140
|
+
"prompt_short": 0.000_000_075,
|
|
141
|
+
"completion_short": 0.000_000_30,
|
|
142
|
+
"prompt_long": 0.000_000_15,
|
|
143
|
+
"completion_long": 0.000_000_60,
|
|
144
|
+
"cached": 0.000_000_037_5,
|
|
145
|
+
},
|
|
146
|
+
"gemini-1.5-flash-002": {
|
|
147
|
+
"prompt_short": 0.000_000_075,
|
|
148
|
+
"completion_short": 0.000_000_30,
|
|
149
|
+
"prompt_long": 0.000_000_15,
|
|
150
|
+
"completion_long": 0.000_000_60,
|
|
151
|
+
"cached": 0.000_000_037_5,
|
|
47
152
|
},
|
|
48
153
|
"gemini-1.5-flash-8b": {
|
|
49
154
|
"prompt_short": 0.000_000_037_5,
|
|
50
155
|
"completion_short": 0.000_000_15,
|
|
51
156
|
"prompt_long": 0.000_000_075,
|
|
52
157
|
"completion_long": 0.000_000_30,
|
|
158
|
+
"cached": 0.000_000_025,
|
|
53
159
|
},
|
|
54
|
-
"gemini-1.5-
|
|
55
|
-
"prompt_short": 0.
|
|
56
|
-
"completion_short": 0.
|
|
57
|
-
"prompt_long": 0.
|
|
58
|
-
"completion_long": 0.
|
|
160
|
+
"gemini-1.5-flash-8b-latest": {
|
|
161
|
+
"prompt_short": 0.000_000_037_5,
|
|
162
|
+
"completion_short": 0.000_000_15,
|
|
163
|
+
"prompt_long": 0.000_000_075,
|
|
164
|
+
"completion_long": 0.000_000_30,
|
|
165
|
+
"cached": 0.000_000_025,
|
|
166
|
+
},
|
|
167
|
+
"gemini-1.5-flash-8b-001": {
|
|
168
|
+
"prompt_short": 0.000_000_037_5,
|
|
169
|
+
"completion_short": 0.000_000_15,
|
|
170
|
+
"prompt_long": 0.000_000_075,
|
|
171
|
+
"completion_long": 0.000_000_30,
|
|
172
|
+
"cached": 0.000_000_025,
|
|
173
|
+
},
|
|
174
|
+
"gemini-1.5-flash-8b-002": {
|
|
175
|
+
"prompt_short": 0.000_000_037_5,
|
|
176
|
+
"completion_short": 0.000_000_15,
|
|
177
|
+
"prompt_long": 0.000_000_075,
|
|
178
|
+
"completion_long": 0.000_000_30,
|
|
179
|
+
"cached": 0.000_000_025,
|
|
59
180
|
},
|
|
60
181
|
"gemini-1.0-pro": {
|
|
61
182
|
"prompt_short": 0.000_000_5,
|
|
62
183
|
"completion_short": 0.000_001_5,
|
|
63
184
|
"prompt_long": 0.000_000_5,
|
|
64
185
|
"completion_long": 0.000_001_5,
|
|
186
|
+
"cached": 0.000_000,
|
|
65
187
|
},
|
|
66
188
|
}
|
|
67
189
|
|
|
68
190
|
if input_tokens is None or output_tokens is None:
|
|
69
191
|
return None
|
|
70
192
|
|
|
193
|
+
if cached_tokens is None:
|
|
194
|
+
cached_tokens = 0
|
|
195
|
+
|
|
71
196
|
try:
|
|
72
197
|
model_pricing = pricing[model]
|
|
73
198
|
except KeyError:
|
|
@@ -77,12 +202,14 @@ def calculate_cost(
|
|
|
77
202
|
use_long_context = input_tokens > 128_000
|
|
78
203
|
|
|
79
204
|
prompt_price = model_pricing["prompt_long" if use_long_context else "prompt_short"]
|
|
205
|
+
cached_price = model_pricing["cached"]
|
|
80
206
|
completion_price = model_pricing[
|
|
81
207
|
"completion_long" if use_long_context else "completion_short"
|
|
82
208
|
]
|
|
83
209
|
|
|
84
210
|
prompt_cost = input_tokens * prompt_price
|
|
211
|
+
cached_cost = cached_tokens * cached_price
|
|
85
212
|
completion_cost = output_tokens * completion_price
|
|
86
|
-
total_cost = prompt_cost + completion_cost
|
|
213
|
+
total_cost = prompt_cost + cached_cost + completion_cost
|
|
87
214
|
|
|
88
215
|
return total_cost
|
|
@@ -16,50 +16,22 @@ from google.genai.types import (
|
|
|
16
16
|
from ...base import BaseMessageParam
|
|
17
17
|
from ...base._utils import get_audio_type, get_image_type
|
|
18
18
|
from ...base._utils._parse_content_template import _load_media
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
def _check_image_media_type(media_type: str) -> None:
|
|
22
|
-
"""Raises a `ValueError` if the image media type is not supported."""
|
|
23
|
-
if media_type not in [
|
|
24
|
-
"image/jpeg",
|
|
25
|
-
"image/png",
|
|
26
|
-
"image/webp",
|
|
27
|
-
"image/heic",
|
|
28
|
-
"image/heif",
|
|
29
|
-
]:
|
|
30
|
-
raise ValueError(
|
|
31
|
-
f"Unsupported image media type: {media_type}. "
|
|
32
|
-
"Google currently only supports JPEG, PNG, WebP, HEIC, "
|
|
33
|
-
"and HEIF images."
|
|
34
|
-
)
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
def _check_audio_media_type(media_type: str) -> None:
|
|
38
|
-
"""Raises a `ValueError` if the audio media type is not supported."""
|
|
39
|
-
if media_type not in [
|
|
40
|
-
"audio/wav",
|
|
41
|
-
"audio/mp3",
|
|
42
|
-
"audio/aiff",
|
|
43
|
-
"audio/aac",
|
|
44
|
-
"audio/ogg",
|
|
45
|
-
"audio/flac",
|
|
46
|
-
]:
|
|
47
|
-
raise ValueError(
|
|
48
|
-
f"Unsupported audio media type: {media_type}. "
|
|
49
|
-
"Google currently only supports WAV, MP3, AIFF, AAC, OGG, "
|
|
50
|
-
"and FLAC audio file types."
|
|
51
|
-
)
|
|
19
|
+
from ._validate_media_type import _check_audio_media_type, _check_image_media_type
|
|
52
20
|
|
|
53
21
|
|
|
54
22
|
def _over_file_size_limit(size: int) -> bool:
|
|
55
|
-
"""Check if the total file size exceeds the limit (
|
|
56
|
-
|
|
23
|
+
"""Check if the total file size exceeds the limit (10mb).
|
|
24
|
+
|
|
25
|
+
Google limit is 20MB but base64 adds 33% to the size.
|
|
26
|
+
"""
|
|
27
|
+
return size > 10 * 1024 * 1024 # 10MB
|
|
57
28
|
|
|
58
29
|
|
|
59
30
|
async def _convert_message_params_async(
|
|
60
31
|
message_params: list[BaseMessageParam | ContentDict], client: Client
|
|
61
32
|
) -> list[ContentDict]:
|
|
62
33
|
converted_message_params = []
|
|
34
|
+
total_payload_size = 0
|
|
63
35
|
for message_param in message_params:
|
|
64
36
|
if not isinstance(message_param, BaseMessageParam):
|
|
65
37
|
converted_message_params.append(message_param)
|
|
@@ -83,7 +55,6 @@ async def _convert_message_params_async(
|
|
|
83
55
|
)
|
|
84
56
|
else:
|
|
85
57
|
converted_content = []
|
|
86
|
-
total_file_size = 0
|
|
87
58
|
must_upload: dict[int, BlobDict] = {}
|
|
88
59
|
for index, part in enumerate(content):
|
|
89
60
|
if part.type == "text":
|
|
@@ -93,10 +64,10 @@ async def _convert_message_params_async(
|
|
|
93
64
|
blob_dict = BlobDict(data=part.image, mime_type=part.media_type)
|
|
94
65
|
converted_content.append(PartDict(inline_data=blob_dict))
|
|
95
66
|
image_size = len(part.image)
|
|
96
|
-
|
|
97
|
-
if _over_file_size_limit(
|
|
67
|
+
total_payload_size += image_size
|
|
68
|
+
if _over_file_size_limit(total_payload_size):
|
|
98
69
|
must_upload[index] = blob_dict
|
|
99
|
-
|
|
70
|
+
total_payload_size -= image_size
|
|
100
71
|
elif part.type == "image_url":
|
|
101
72
|
if (
|
|
102
73
|
client.vertexai
|
|
@@ -112,17 +83,17 @@ async def _convert_message_params_async(
|
|
|
112
83
|
)
|
|
113
84
|
else:
|
|
114
85
|
downloaded_image = _load_media(part.url)
|
|
115
|
-
media_type = get_image_type(downloaded_image)
|
|
86
|
+
media_type = f"image/{get_image_type(downloaded_image)}"
|
|
116
87
|
_check_image_media_type(media_type)
|
|
117
88
|
blob_dict = BlobDict(
|
|
118
89
|
data=downloaded_image, mime_type=media_type
|
|
119
90
|
)
|
|
120
91
|
converted_content.append(PartDict(inline_data=blob_dict))
|
|
121
92
|
image_size = len(downloaded_image)
|
|
122
|
-
|
|
123
|
-
if _over_file_size_limit(
|
|
93
|
+
total_payload_size += image_size
|
|
94
|
+
if _over_file_size_limit(total_payload_size):
|
|
124
95
|
must_upload[index] = blob_dict
|
|
125
|
-
|
|
96
|
+
total_payload_size -= image_size
|
|
126
97
|
elif part.type == "audio":
|
|
127
98
|
_check_audio_media_type(part.media_type)
|
|
128
99
|
audio_data = (
|
|
@@ -133,10 +104,10 @@ async def _convert_message_params_async(
|
|
|
133
104
|
blob_dict = BlobDict(data=audio_data, mime_type=part.media_type)
|
|
134
105
|
converted_content.append(PartDict(inline_data=blob_dict))
|
|
135
106
|
audio_size = len(audio_data)
|
|
136
|
-
|
|
137
|
-
if _over_file_size_limit(
|
|
107
|
+
total_payload_size += audio_size
|
|
108
|
+
if _over_file_size_limit(total_payload_size):
|
|
138
109
|
must_upload[index] = blob_dict
|
|
139
|
-
|
|
110
|
+
total_payload_size -= audio_size
|
|
140
111
|
elif part.type == "audio_url":
|
|
141
112
|
if (
|
|
142
113
|
client.vertexai
|
|
@@ -152,17 +123,17 @@ async def _convert_message_params_async(
|
|
|
152
123
|
)
|
|
153
124
|
else:
|
|
154
125
|
downloaded_audio = _load_media(part.url)
|
|
155
|
-
media_type = get_audio_type(downloaded_audio)
|
|
126
|
+
media_type = f"audio/{get_audio_type(downloaded_audio)}"
|
|
156
127
|
_check_audio_media_type(media_type)
|
|
157
128
|
blob_dict = BlobDict(
|
|
158
129
|
data=downloaded_audio, mime_type=media_type
|
|
159
130
|
)
|
|
160
131
|
converted_content.append(PartDict(inline_data=blob_dict))
|
|
161
132
|
audio_size = len(downloaded_audio)
|
|
162
|
-
|
|
163
|
-
if _over_file_size_limit(
|
|
133
|
+
total_payload_size += audio_size
|
|
134
|
+
if _over_file_size_limit(total_payload_size):
|
|
164
135
|
must_upload[index] = blob_dict
|
|
165
|
-
|
|
136
|
+
total_payload_size -= audio_size
|
|
166
137
|
else:
|
|
167
138
|
raise ValueError(
|
|
168
139
|
"Google currently only supports text, image, and audio parts. "
|
|
@@ -211,4 +182,5 @@ def convert_message_params(
|
|
|
211
182
|
)
|
|
212
183
|
return future.result()
|
|
213
184
|
except RuntimeError:
|
|
214
|
-
|
|
185
|
+
...
|
|
186
|
+
return asyncio.run(_convert_message_params_async(message_params, client))
|
|
@@ -13,35 +13,15 @@ from mirascope.core.base._utils._base_message_param_converter import (
|
|
|
13
13
|
BaseMessageParamConverter,
|
|
14
14
|
)
|
|
15
15
|
from mirascope.core.base.message_param import (
|
|
16
|
+
AudioPart,
|
|
16
17
|
AudioURLPart,
|
|
17
18
|
ImageURLPart,
|
|
18
19
|
ToolCallPart,
|
|
19
20
|
ToolResultPart,
|
|
20
21
|
)
|
|
21
|
-
from mirascope.core.gemini._utils._message_param_converter import _is_audio_mime
|
|
22
22
|
from mirascope.core.google._utils import convert_message_params
|
|
23
23
|
|
|
24
|
-
|
|
25
|
-
def _is_image_mime(mime_type: str) -> bool:
|
|
26
|
-
return mime_type in ["image/jpeg", "image/png", "image/gif", "image/webp"]
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def _to_image_part(mime_type: str, data: bytes) -> ImagePart:
|
|
30
|
-
if not _is_image_mime(mime_type):
|
|
31
|
-
raise ValueError(
|
|
32
|
-
f"Unsupported image media type: {mime_type}. "
|
|
33
|
-
"Expected one of: image/jpeg, image/png, image/gif, image/webp."
|
|
34
|
-
)
|
|
35
|
-
return ImagePart(type="image", media_type=mime_type, image=data, detail=None)
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
def _to_document_part(mime_type: str, data: bytes) -> DocumentPart:
|
|
39
|
-
if mime_type != "application/pdf":
|
|
40
|
-
raise ValueError(
|
|
41
|
-
f"Unsupported document media type: {mime_type}. "
|
|
42
|
-
"Only application/pdf is supported."
|
|
43
|
-
)
|
|
44
|
-
return DocumentPart(type="document", media_type=mime_type, document=data)
|
|
24
|
+
from ._validate_media_type import _check_audio_media_type, _check_image_media_type
|
|
45
25
|
|
|
46
26
|
|
|
47
27
|
class GoogleMessageParamConverter(BaseMessageParamConverter):
|
|
@@ -74,21 +54,42 @@ class GoogleMessageParamConverter(BaseMessageParamConverter):
|
|
|
74
54
|
if part.text:
|
|
75
55
|
content_list.append(TextPart(type="text", text=part.text))
|
|
76
56
|
|
|
77
|
-
elif part.inline_data:
|
|
78
|
-
|
|
79
|
-
mime = blob.mime_type or ""
|
|
57
|
+
elif blob := part.inline_data:
|
|
58
|
+
mime_type = blob.mime_type or ""
|
|
80
59
|
data = blob.data or b""
|
|
81
|
-
if
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
60
|
+
if mime_type.startswith("image/"):
|
|
61
|
+
_check_image_media_type(mime_type)
|
|
62
|
+
content_list.append(
|
|
63
|
+
ImagePart(
|
|
64
|
+
type="image",
|
|
65
|
+
media_type=mime_type,
|
|
66
|
+
image=data,
|
|
67
|
+
detail=None,
|
|
68
|
+
)
|
|
69
|
+
)
|
|
70
|
+
elif mime_type.startswith("audio/"):
|
|
71
|
+
_check_audio_media_type(mime_type)
|
|
72
|
+
content_list.append(
|
|
73
|
+
AudioPart(
|
|
74
|
+
type="audio",
|
|
75
|
+
media_type=mime_type,
|
|
76
|
+
audio=data,
|
|
77
|
+
)
|
|
78
|
+
)
|
|
79
|
+
elif mime_type == "application/pdf":
|
|
80
|
+
content_list.append(
|
|
81
|
+
DocumentPart(
|
|
82
|
+
type="document", media_type=mime_type, document=data
|
|
83
|
+
)
|
|
84
|
+
)
|
|
85
85
|
else:
|
|
86
86
|
raise ValueError(
|
|
87
|
-
f"Unsupported inline_data mime type: {
|
|
87
|
+
f"Unsupported inline_data mime type: {mime_type}. Cannot convert to BaseMessageParam."
|
|
88
88
|
)
|
|
89
89
|
|
|
90
|
-
elif part.file_data:
|
|
91
|
-
|
|
90
|
+
elif file_data := part.file_data:
|
|
91
|
+
mime_type = file_data.mime_type or ""
|
|
92
|
+
if mime_type.startswith("image/"):
|
|
92
93
|
content_list.append(
|
|
93
94
|
ImageURLPart(
|
|
94
95
|
type="image_url",
|
|
@@ -96,7 +97,7 @@ class GoogleMessageParamConverter(BaseMessageParamConverter):
|
|
|
96
97
|
detail=None,
|
|
97
98
|
)
|
|
98
99
|
)
|
|
99
|
-
elif
|
|
100
|
+
elif mime_type.startswith("audio/"):
|
|
100
101
|
content_list.append(
|
|
101
102
|
AudioURLPart(
|
|
102
103
|
type="audio_url",
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""Utilities for validating supported media types for Google models."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def _check_image_media_type(media_type: str) -> None:
|
|
5
|
+
"""Raises a `ValueError` if the image media type is not supported."""
|
|
6
|
+
if media_type not in [
|
|
7
|
+
"image/jpeg",
|
|
8
|
+
"image/png",
|
|
9
|
+
"image/webp",
|
|
10
|
+
"image/heic",
|
|
11
|
+
"image/heif",
|
|
12
|
+
]:
|
|
13
|
+
raise ValueError(
|
|
14
|
+
f"Unsupported image media type: {media_type}. "
|
|
15
|
+
"Google currently only supports JPEG, PNG, WebP, HEIC, "
|
|
16
|
+
"and HEIF images."
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _check_audio_media_type(media_type: str) -> None:
|
|
21
|
+
"""Raises a `ValueError` if the audio media type is not supported."""
|
|
22
|
+
if media_type not in [
|
|
23
|
+
"audio/wav",
|
|
24
|
+
"audio/mp3",
|
|
25
|
+
"audio/aiff",
|
|
26
|
+
"audio/aac",
|
|
27
|
+
"audio/ogg",
|
|
28
|
+
"audio/flac",
|
|
29
|
+
]:
|
|
30
|
+
raise ValueError(
|
|
31
|
+
f"Unsupported audio media type: {media_type}. "
|
|
32
|
+
"Google currently only supports WAV, MP3, AIFF, AAC, OGG, "
|
|
33
|
+
"and FLAC audio file types."
|
|
34
|
+
)
|
|
@@ -11,6 +11,7 @@ from google.genai.types import (
|
|
|
11
11
|
ContentListUnionDict,
|
|
12
12
|
FunctionResponseDict,
|
|
13
13
|
GenerateContentResponse,
|
|
14
|
+
GenerateContentResponseUsageMetadata,
|
|
14
15
|
PartDict,
|
|
15
16
|
# Import manually SchemaDict to avoid Pydantic error
|
|
16
17
|
SchemaDict, # noqa: F401
|
|
@@ -68,11 +69,13 @@ class GoogleCallResponse(
|
|
|
68
69
|
|
|
69
70
|
_provider = "google"
|
|
70
71
|
|
|
72
|
+
@computed_field
|
|
71
73
|
@property
|
|
72
74
|
def content(self) -> str:
|
|
73
75
|
"""Returns the contained string content for the 0th choice."""
|
|
74
76
|
return self.response.candidates[0].content.parts[0].text # pyright: ignore [reportOptionalSubscript, reportReturnType, reportOptionalMemberAccess, reportOptionalIterable]
|
|
75
77
|
|
|
78
|
+
@computed_field
|
|
76
79
|
@property
|
|
77
80
|
def finish_reasons(self) -> list[str]:
|
|
78
81
|
"""Returns the finish reasons of the response."""
|
|
@@ -83,6 +86,7 @@ class GoogleCallResponse(
|
|
|
83
86
|
if candidate and candidate.finish_reason is not None
|
|
84
87
|
]
|
|
85
88
|
|
|
89
|
+
@computed_field
|
|
86
90
|
@property
|
|
87
91
|
def model(self) -> str:
|
|
88
92
|
"""Returns the model name.
|
|
@@ -90,8 +94,11 @@ class GoogleCallResponse(
|
|
|
90
94
|
google.generativeai does not return model, so we return the model provided by
|
|
91
95
|
the user.
|
|
92
96
|
"""
|
|
93
|
-
return
|
|
97
|
+
return (
|
|
98
|
+
self.response.model_version if self.response.model_version else self._model
|
|
99
|
+
)
|
|
94
100
|
|
|
101
|
+
@computed_field
|
|
95
102
|
@property
|
|
96
103
|
def id(self) -> str | None:
|
|
97
104
|
"""Returns the id of the response.
|
|
@@ -101,13 +108,14 @@ class GoogleCallResponse(
|
|
|
101
108
|
return None
|
|
102
109
|
|
|
103
110
|
@property
|
|
104
|
-
def usage(self) -> None:
|
|
111
|
+
def usage(self) -> GenerateContentResponseUsageMetadata | None:
|
|
105
112
|
"""Returns the usage of the chat completion.
|
|
106
113
|
|
|
107
114
|
google.generativeai does not have Usage, so we return None
|
|
108
115
|
"""
|
|
109
|
-
return
|
|
116
|
+
return self.response.usage_metadata
|
|
110
117
|
|
|
118
|
+
@computed_field
|
|
111
119
|
@property
|
|
112
120
|
def input_tokens(self) -> int | None:
|
|
113
121
|
"""Returns the number of input tokens."""
|
|
@@ -117,6 +125,17 @@ class GoogleCallResponse(
|
|
|
117
125
|
else None
|
|
118
126
|
)
|
|
119
127
|
|
|
128
|
+
@computed_field
|
|
129
|
+
@property
|
|
130
|
+
def cached_tokens(self) -> int | None:
|
|
131
|
+
"""Returns the number of cached tokens."""
|
|
132
|
+
return (
|
|
133
|
+
self.response.usage_metadata.cached_content_token_count
|
|
134
|
+
if self.response.usage_metadata
|
|
135
|
+
else None
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
@computed_field
|
|
120
139
|
@property
|
|
121
140
|
def output_tokens(self) -> int | None:
|
|
122
141
|
"""Returns the number of output tokens."""
|
|
@@ -126,10 +145,13 @@ class GoogleCallResponse(
|
|
|
126
145
|
else None
|
|
127
146
|
)
|
|
128
147
|
|
|
148
|
+
@computed_field
|
|
129
149
|
@property
|
|
130
150
|
def cost(self) -> float | None:
|
|
131
151
|
"""Returns the cost of the call."""
|
|
132
|
-
return calculate_cost(
|
|
152
|
+
return calculate_cost(
|
|
153
|
+
self.input_tokens, self.cached_tokens, self.output_tokens, self.model
|
|
154
|
+
)
|
|
133
155
|
|
|
134
156
|
@computed_field
|
|
135
157
|
@cached_property
|