mirascope 1.16.9__py3-none-any.whl → 1.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/__init__.py +20 -1
- mirascope/core/__init__.py +4 -0
- mirascope/core/anthropic/_utils/_convert_message_params.py +13 -0
- mirascope/core/azure/_utils/_convert_message_params.py +10 -0
- mirascope/core/azure/_utils/_message_param_converter.py +46 -12
- mirascope/core/base/__init__.py +4 -0
- mirascope/core/base/_utils/_convert_messages_to_message_params.py +36 -3
- mirascope/core/base/_utils/_parse_content_template.py +35 -9
- mirascope/core/base/message_param.py +30 -2
- mirascope/core/base/messages.py +10 -0
- mirascope/core/base/stream_config.py +1 -1
- mirascope/core/bedrock/_utils/_convert_message_params.py +18 -1
- mirascope/core/gemini/__init__.py +10 -0
- mirascope/core/gemini/_utils/_convert_message_params.py +48 -5
- mirascope/core/gemini/_utils/_message_param_converter.py +51 -5
- mirascope/core/gemini/_utils/_setup_call.py +12 -2
- mirascope/core/google/__init__.py +29 -0
- mirascope/core/google/_call.py +67 -0
- mirascope/core/google/_call_kwargs.py +13 -0
- mirascope/core/google/_utils/__init__.py +16 -0
- mirascope/core/google/_utils/_calculate_cost.py +88 -0
- mirascope/core/google/_utils/_convert_common_call_params.py +39 -0
- mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +27 -0
- mirascope/core/google/_utils/_convert_message_params.py +177 -0
- mirascope/core/google/_utils/_get_json_output.py +37 -0
- mirascope/core/google/_utils/_handle_stream.py +35 -0
- mirascope/core/google/_utils/_message_param_converter.py +153 -0
- mirascope/core/google/_utils/_setup_call.py +180 -0
- mirascope/core/google/call_params.py +22 -0
- mirascope/core/google/call_response.py +202 -0
- mirascope/core/google/call_response_chunk.py +97 -0
- mirascope/core/google/dynamic_config.py +26 -0
- mirascope/core/google/stream.py +128 -0
- mirascope/core/google/tool.py +104 -0
- mirascope/core/groq/_utils/_convert_message_params.py +9 -0
- mirascope/core/groq/_utils/_message_param_converter.py +9 -2
- mirascope/core/mistral/_utils/_convert_message_params.py +7 -0
- mirascope/core/mistral/_utils/_message_param_converter.py +41 -35
- mirascope/core/openai/_utils/_convert_message_params.py +38 -1
- mirascope/core/openai/_utils/_message_param_converter.py +28 -4
- mirascope/core/vertex/__init__.py +15 -0
- mirascope/core/vertex/_utils/_convert_message_params.py +56 -6
- mirascope/core/vertex/_utils/_message_param_converter.py +13 -5
- mirascope/core/vertex/_utils/_setup_call.py +10 -1
- mirascope/llm/_protocols.py +1 -0
- mirascope/llm/call_response.py +5 -1
- mirascope/llm/llm_call.py +4 -0
- mirascope/llm/llm_override.py +16 -3
- mirascope/retries/__init__.py +5 -0
- mirascope/retries/fallback.py +128 -0
- {mirascope-1.16.9.dist-info → mirascope-1.18.0.dist-info}/METADATA +4 -1
- {mirascope-1.16.9.dist-info → mirascope-1.18.0.dist-info}/RECORD +54 -35
- {mirascope-1.16.9.dist-info → mirascope-1.18.0.dist-info}/WHEEL +0 -0
- {mirascope-1.16.9.dist-info → mirascope-1.18.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,7 +8,14 @@ from google.generativeai.types import (
|
|
|
8
8
|
)
|
|
9
9
|
|
|
10
10
|
from mirascope.core import BaseMessageParam
|
|
11
|
-
from mirascope.core.base import
|
|
11
|
+
from mirascope.core.base import (
|
|
12
|
+
AudioPart,
|
|
13
|
+
AudioURLPart,
|
|
14
|
+
DocumentPart,
|
|
15
|
+
ImagePart,
|
|
16
|
+
ImageURLPart,
|
|
17
|
+
TextPart,
|
|
18
|
+
)
|
|
12
19
|
from mirascope.core.base._utils._base_message_param_converter import (
|
|
13
20
|
BaseMessageParamConverter,
|
|
14
21
|
)
|
|
@@ -29,6 +36,28 @@ def _to_image_part(mime_type: str, data: bytes) -> ImagePart:
|
|
|
29
36
|
return ImagePart(type="image", media_type=mime_type, image=data, detail=None)
|
|
30
37
|
|
|
31
38
|
|
|
39
|
+
def _is_audio_mime(mime_type: str) -> bool:
|
|
40
|
+
return mime_type in [
|
|
41
|
+
"audio/wav",
|
|
42
|
+
"audio/mp3",
|
|
43
|
+
"audio/wav",
|
|
44
|
+
"audio/mp3",
|
|
45
|
+
"audio/aiff",
|
|
46
|
+
"audio/aac",
|
|
47
|
+
"audio/ogg",
|
|
48
|
+
"audio/flac",
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _to_audio_part(mime_type: str, data: bytes) -> AudioPart:
|
|
53
|
+
if not _is_audio_mime(mime_type):
|
|
54
|
+
raise ValueError(
|
|
55
|
+
f"Unsupported audio media type: {mime_type}. "
|
|
56
|
+
"Expected one of: audio/wav, audio/mp3, audio/aiff, audio/aac, audio/ogg, audio/flac."
|
|
57
|
+
)
|
|
58
|
+
return AudioPart(type="audio", media_type=mime_type, audio=data)
|
|
59
|
+
|
|
60
|
+
|
|
32
61
|
def _to_document_part(mime_type: str, data: bytes) -> DocumentPart:
|
|
33
62
|
if mime_type != "application/pdf":
|
|
34
63
|
raise ValueError(
|
|
@@ -77,6 +106,8 @@ class GeminiMessageParamConverter(BaseMessageParamConverter):
|
|
|
77
106
|
data = blob.data
|
|
78
107
|
if _is_image_mime(mime):
|
|
79
108
|
content_list.append(_to_image_part(mime, data))
|
|
109
|
+
elif _is_audio_mime(mime):
|
|
110
|
+
content_list.append(_to_audio_part(mime, data))
|
|
80
111
|
elif mime == "application/pdf":
|
|
81
112
|
content_list.append(_to_document_part(mime, data))
|
|
82
113
|
else:
|
|
@@ -85,10 +116,25 @@ class GeminiMessageParamConverter(BaseMessageParamConverter):
|
|
|
85
116
|
)
|
|
86
117
|
|
|
87
118
|
elif part.file_data:
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
119
|
+
if _is_image_mime(part.file_data.mime_type):
|
|
120
|
+
content_list.append(
|
|
121
|
+
ImageURLPart(
|
|
122
|
+
type="image_url",
|
|
123
|
+
url=part.file_data.file_uri,
|
|
124
|
+
detail=None,
|
|
125
|
+
)
|
|
126
|
+
)
|
|
127
|
+
elif _is_audio_mime(part.file_data.mime_type):
|
|
128
|
+
content_list.append(
|
|
129
|
+
AudioURLPart(
|
|
130
|
+
type="audio_url",
|
|
131
|
+
url=part.file_data.file_uri,
|
|
132
|
+
)
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
raise ValueError(
|
|
136
|
+
f"Unsupported file_data mime type: {part.file_data.mime_type}. Cannot convert to BaseMessageParam."
|
|
137
|
+
)
|
|
92
138
|
elif part.function_call:
|
|
93
139
|
converted.append(
|
|
94
140
|
BaseMessageParam(
|
|
@@ -11,7 +11,7 @@ from google.generativeai.types import (
|
|
|
11
11
|
GenerateContentResponse,
|
|
12
12
|
GenerationConfigDict,
|
|
13
13
|
)
|
|
14
|
-
from google.generativeai.types.content_types import ToolConfigDict
|
|
14
|
+
from google.generativeai.types.content_types import ToolConfigDict, to_content
|
|
15
15
|
from pydantic import BaseModel
|
|
16
16
|
|
|
17
17
|
from ...base import BaseMessageParam, BaseTool, _utils
|
|
@@ -108,6 +108,7 @@ def setup_call(
|
|
|
108
108
|
call_kwargs = cast(GeminiCallKwargs, base_call_kwargs)
|
|
109
109
|
messages = cast(list[BaseMessageParam | ContentDict], messages)
|
|
110
110
|
messages = convert_message_params(messages)
|
|
111
|
+
|
|
111
112
|
if json_mode:
|
|
112
113
|
generation_config = call_kwargs.get("generation_config", {})
|
|
113
114
|
if is_dataclass(generation_config):
|
|
@@ -125,11 +126,20 @@ def setup_call(
|
|
|
125
126
|
"allowed_function_names": [tool_types[0]._name()],
|
|
126
127
|
}
|
|
127
128
|
call_kwargs["tool_config"] = tool_config
|
|
128
|
-
call_kwargs |= {"contents": messages}
|
|
129
129
|
|
|
130
130
|
if client is None:
|
|
131
131
|
client = GenerativeModel(model_name=model)
|
|
132
132
|
|
|
133
|
+
if messages and messages[0]["role"] == "system":
|
|
134
|
+
system_instruction = client._system_instruction
|
|
135
|
+
system_instruction = (
|
|
136
|
+
list(system_instruction.parts) if system_instruction else []
|
|
137
|
+
)
|
|
138
|
+
system_instruction.extend(messages.pop(0)["parts"]) # pyright: ignore [reportArgumentType]
|
|
139
|
+
client._system_instruction = to_content(system_instruction) # pyright: ignore [reportArgumentType]
|
|
140
|
+
|
|
141
|
+
call_kwargs |= {"contents": messages}
|
|
142
|
+
|
|
133
143
|
create = (
|
|
134
144
|
get_async_create_fn(client.generate_content_async)
|
|
135
145
|
if fn_is_async(fn)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""The Mirascope Google Module."""
|
|
2
|
+
|
|
3
|
+
from typing import TypeAlias
|
|
4
|
+
|
|
5
|
+
from google.genai.types import ContentDict, FunctionResponse
|
|
6
|
+
|
|
7
|
+
from ..base import BaseMessageParam
|
|
8
|
+
from ._call import google_call
|
|
9
|
+
from ._call import google_call as call
|
|
10
|
+
from .call_params import GoogleCallParams
|
|
11
|
+
from .call_response import GoogleCallResponse
|
|
12
|
+
from .call_response_chunk import GoogleCallResponseChunk
|
|
13
|
+
from .dynamic_config import GoogleDynamicConfig
|
|
14
|
+
from .stream import GoogleStream
|
|
15
|
+
from .tool import GoogleTool
|
|
16
|
+
|
|
17
|
+
GoogleMessageParam: TypeAlias = ContentDict | FunctionResponse | BaseMessageParam
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"call",
|
|
21
|
+
"GoogleDynamicConfig",
|
|
22
|
+
"GoogleCallParams",
|
|
23
|
+
"GoogleCallResponse",
|
|
24
|
+
"GoogleCallResponseChunk",
|
|
25
|
+
"GoogleMessageParam",
|
|
26
|
+
"GoogleStream",
|
|
27
|
+
"GoogleTool",
|
|
28
|
+
"google_call",
|
|
29
|
+
]
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""The `google_call` decorator for functions as LLM calls."""
|
|
2
|
+
|
|
3
|
+
from ..base import call_factory
|
|
4
|
+
from ._utils import (
|
|
5
|
+
get_json_output,
|
|
6
|
+
handle_stream,
|
|
7
|
+
handle_stream_async,
|
|
8
|
+
setup_call,
|
|
9
|
+
)
|
|
10
|
+
from .call_params import GoogleCallParams
|
|
11
|
+
from .call_response import GoogleCallResponse
|
|
12
|
+
from .call_response_chunk import GoogleCallResponseChunk
|
|
13
|
+
from .stream import GoogleStream
|
|
14
|
+
from .tool import GoogleTool
|
|
15
|
+
|
|
16
|
+
google_call = call_factory(
|
|
17
|
+
TCallResponse=GoogleCallResponse,
|
|
18
|
+
TCallResponseChunk=GoogleCallResponseChunk,
|
|
19
|
+
TStream=GoogleStream,
|
|
20
|
+
TToolType=GoogleTool,
|
|
21
|
+
default_call_params=GoogleCallParams(),
|
|
22
|
+
setup_call=setup_call,
|
|
23
|
+
get_json_output=get_json_output,
|
|
24
|
+
handle_stream=handle_stream, # pyright: ignore [reportArgumentType]
|
|
25
|
+
handle_stream_async=handle_stream_async, # pyright: ignore [reportArgumentType]
|
|
26
|
+
)
|
|
27
|
+
"""A decorator for calling the Google API with a typed function.
|
|
28
|
+
|
|
29
|
+
usage docs: learn/calls.md
|
|
30
|
+
|
|
31
|
+
This decorator is used to wrap a typed function that calls the Google API. It parses
|
|
32
|
+
the prompt template of the wrapped function as the messages array and templates the input
|
|
33
|
+
arguments for the function into each message's template.
|
|
34
|
+
|
|
35
|
+
Example:
|
|
36
|
+
|
|
37
|
+
```python
|
|
38
|
+
from mirascope.core import prompt_template
|
|
39
|
+
from mirascope.core.google import google_call
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@google_call("google-1.5-flash")
|
|
43
|
+
def recommend_book(genre: str) -> str:
|
|
44
|
+
return f"Recommend a {genre} book"
|
|
45
|
+
|
|
46
|
+
response = recommend_book("fantasy")
|
|
47
|
+
print(response.content)
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
model (str): The Google model to use in the API call.
|
|
52
|
+
stream (bool): Whether to stream the response from the API call.
|
|
53
|
+
tools (list[BaseTool | Callable]): The tools to use in the Google API call.
|
|
54
|
+
response_model (BaseModel | BaseType): The response model into which the response
|
|
55
|
+
should be structured.
|
|
56
|
+
output_parser (Callable[[GoogleCallResponse | ResponseModelT], Any]): A function
|
|
57
|
+
for parsing the call response whose value will be returned in place of the
|
|
58
|
+
original call response.
|
|
59
|
+
json_modem (bool): Whether to use JSON Mode.
|
|
60
|
+
client (object): An optional custom client to use in place of the default client.
|
|
61
|
+
call_params (GoogleCallParams): The `GoogleCallParams` call parameters to use in the
|
|
62
|
+
API call.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
decorator (Callable): The decorator for turning a typed function into a Google API
|
|
66
|
+
call.
|
|
67
|
+
"""
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""This module contains the type definition for the Google call keyword arguments."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
|
|
5
|
+
from google.genai.types import ContentOrDict, Tool
|
|
6
|
+
|
|
7
|
+
from ..base import BaseCallKwargs
|
|
8
|
+
from .call_params import GoogleCallParams
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GoogleCallKwargs(GoogleCallParams, BaseCallKwargs[Tool]):
|
|
12
|
+
model: str
|
|
13
|
+
contents: Sequence[ContentOrDict]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""Google utilities for decorator factories."""
|
|
2
|
+
|
|
3
|
+
from ._calculate_cost import calculate_cost
|
|
4
|
+
from ._convert_message_params import convert_message_params
|
|
5
|
+
from ._get_json_output import get_json_output
|
|
6
|
+
from ._handle_stream import handle_stream, handle_stream_async
|
|
7
|
+
from ._setup_call import setup_call
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"calculate_cost",
|
|
11
|
+
"convert_message_params",
|
|
12
|
+
"get_json_output",
|
|
13
|
+
"handle_stream",
|
|
14
|
+
"handle_stream_async",
|
|
15
|
+
"setup_call",
|
|
16
|
+
]
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"""Calculate the cost of a Gemini API call."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def calculate_cost(
|
|
5
|
+
input_tokens: int | float | None, output_tokens: int | float | None, model: str
|
|
6
|
+
) -> float | None:
|
|
7
|
+
"""Calculate the cost of a Google API call.
|
|
8
|
+
|
|
9
|
+
https://ai.google.dev/pricing
|
|
10
|
+
|
|
11
|
+
Pricing (per 1M tokens):
|
|
12
|
+
|
|
13
|
+
Model Input (<128K) Output (<128K) Input (>128K) Output (>128K)
|
|
14
|
+
gemini-2.0-flash $0.10 $0.40 $0.10 $0.40
|
|
15
|
+
gemini-2.0-flash-lite $0.075 $0.30 $0.075 $0.30
|
|
16
|
+
gemini-1.5-flash $0.075 $0.30 $0.15 $0.60
|
|
17
|
+
gemini-1.5-flash-8b $0.0375 $0.15 $0.075 $0.30
|
|
18
|
+
gemini-1.5-pro $1.25 $5.00 $2.50 $10.00
|
|
19
|
+
gemini-1.0-pro $0.50 $1.50 $0.50 $1.50
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
input_tokens: Number of input tokens
|
|
23
|
+
output_tokens: Number of output tokens
|
|
24
|
+
model: Model name to use for pricing calculation
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Total cost in USD or None if invalid input
|
|
28
|
+
"""
|
|
29
|
+
pricing = {
|
|
30
|
+
"gemini-2.0-flash": {
|
|
31
|
+
"prompt_short": 0.000_000_10,
|
|
32
|
+
"completion_short": 0.000_000_40,
|
|
33
|
+
"prompt_long": 0.000_000_10,
|
|
34
|
+
"completion_long": 0.000_000_40,
|
|
35
|
+
},
|
|
36
|
+
"gemini-2.0-flash-lite": {
|
|
37
|
+
"prompt_short": 0.000_000_075,
|
|
38
|
+
"completion_short": 0.000_000_30,
|
|
39
|
+
"prompt_long": 0.000_000_075,
|
|
40
|
+
"completion_long": 0.000_000_30,
|
|
41
|
+
},
|
|
42
|
+
"gemini-1.5-flash": {
|
|
43
|
+
"prompt_short": 0.000_000_075,
|
|
44
|
+
"completion_short": 0.000_000_30,
|
|
45
|
+
"prompt_long": 0.000_000_15,
|
|
46
|
+
"completion_long": 0.000_000_60,
|
|
47
|
+
},
|
|
48
|
+
"gemini-1.5-flash-8b": {
|
|
49
|
+
"prompt_short": 0.000_000_037_5,
|
|
50
|
+
"completion_short": 0.000_000_15,
|
|
51
|
+
"prompt_long": 0.000_000_075,
|
|
52
|
+
"completion_long": 0.000_000_30,
|
|
53
|
+
},
|
|
54
|
+
"gemini-1.5-pro": {
|
|
55
|
+
"prompt_short": 0.000_001_25,
|
|
56
|
+
"completion_short": 0.000_005,
|
|
57
|
+
"prompt_long": 0.000_002_5,
|
|
58
|
+
"completion_long": 0.000_01,
|
|
59
|
+
},
|
|
60
|
+
"gemini-1.0-pro": {
|
|
61
|
+
"prompt_short": 0.000_000_5,
|
|
62
|
+
"completion_short": 0.000_001_5,
|
|
63
|
+
"prompt_long": 0.000_000_5,
|
|
64
|
+
"completion_long": 0.000_001_5,
|
|
65
|
+
},
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if input_tokens is None or output_tokens is None:
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
model_pricing = pricing[model]
|
|
73
|
+
except KeyError:
|
|
74
|
+
return None
|
|
75
|
+
|
|
76
|
+
# Determine if we're using long context pricing
|
|
77
|
+
use_long_context = input_tokens > 128_000
|
|
78
|
+
|
|
79
|
+
prompt_price = model_pricing["prompt_long" if use_long_context else "prompt_short"]
|
|
80
|
+
completion_price = model_pricing[
|
|
81
|
+
"completion_long" if use_long_context else "completion_short"
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
prompt_cost = input_tokens * prompt_price
|
|
85
|
+
completion_cost = output_tokens * completion_price
|
|
86
|
+
total_cost = prompt_cost + completion_cost
|
|
87
|
+
|
|
88
|
+
return total_cost
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from typing import cast
|
|
2
|
+
|
|
3
|
+
from google.genai.types import (
|
|
4
|
+
GenerationConfigDict,
|
|
5
|
+
)
|
|
6
|
+
|
|
7
|
+
from ...base.call_params import CommonCallParams
|
|
8
|
+
from ..call_params import GoogleCallParams
|
|
9
|
+
|
|
10
|
+
GOOGLE_PARAM_MAPPING = {
|
|
11
|
+
"temperature": "temperature",
|
|
12
|
+
"max_tokens": "max_output_tokens",
|
|
13
|
+
"top_p": "top_p",
|
|
14
|
+
"stop": "stop_sequences",
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def convert_common_call_params(common_params: CommonCallParams) -> GoogleCallParams:
|
|
19
|
+
"""Convert CommonCallParams to Google parameters."""
|
|
20
|
+
generation_config = {}
|
|
21
|
+
|
|
22
|
+
for key, value in common_params.items():
|
|
23
|
+
if key not in GOOGLE_PARAM_MAPPING or value is None:
|
|
24
|
+
continue
|
|
25
|
+
|
|
26
|
+
if key == "stop":
|
|
27
|
+
generation_config["stop_sequences"] = (
|
|
28
|
+
[value] if isinstance(value, str) else value
|
|
29
|
+
)
|
|
30
|
+
else:
|
|
31
|
+
generation_config[GOOGLE_PARAM_MAPPING[key]] = value
|
|
32
|
+
|
|
33
|
+
if not generation_config:
|
|
34
|
+
return cast(GoogleCallParams, {})
|
|
35
|
+
|
|
36
|
+
return cast(
|
|
37
|
+
GoogleCallParams,
|
|
38
|
+
{"generation_config": cast(GenerationConfigDict, generation_config)},
|
|
39
|
+
)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from mirascope.core.base._utils._convert_provider_finish_reason_to_finish_reason import (
|
|
2
|
+
FinishReasonMappingValue,
|
|
3
|
+
_convert_finish_reasons_to_common_finish_reasons_from_mapping,
|
|
4
|
+
)
|
|
5
|
+
from mirascope.core.base.types import FinishReason
|
|
6
|
+
|
|
7
|
+
_FinishReasonMapping: dict[str, FinishReasonMappingValue] = {
|
|
8
|
+
"FINISH_REASON_UNSPECIFIED": "stop",
|
|
9
|
+
"STOP": "stop",
|
|
10
|
+
"MAX_TOKENS": "length",
|
|
11
|
+
"SAFETY": "content_filter",
|
|
12
|
+
"RECITATION": "stop",
|
|
13
|
+
"OTHER": "stop",
|
|
14
|
+
"BLOCKLIST": "stop",
|
|
15
|
+
"PROHIBITED_CONTENT": "stop",
|
|
16
|
+
"SPII": "stop",
|
|
17
|
+
"MALFORMED_FUNCTION_CALL": "stop",
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _convert_finish_reasons_to_common_finish_reasons(
|
|
22
|
+
finish_reasons: list[str],
|
|
23
|
+
) -> list[FinishReason] | None:
|
|
24
|
+
"""Provider-agnostic finish reasons."""
|
|
25
|
+
return _convert_finish_reasons_to_common_finish_reasons_from_mapping(
|
|
26
|
+
finish_reasons, _FinishReasonMapping
|
|
27
|
+
)
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
"""Utility for converting `BaseMessageParam` to `ContentsType`"""
|
|
2
|
+
|
|
3
|
+
import base64
|
|
4
|
+
import io
|
|
5
|
+
|
|
6
|
+
import PIL.Image
|
|
7
|
+
from google.genai import Client
|
|
8
|
+
from google.genai.types import BlobDict, ContentDict, FileDataDict, PartDict
|
|
9
|
+
|
|
10
|
+
from ...base import BaseMessageParam
|
|
11
|
+
from ...base._utils import get_audio_type
|
|
12
|
+
from ...base._utils._parse_content_template import _load_media
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def convert_message_params(
|
|
16
|
+
message_params: list[BaseMessageParam | ContentDict], client: Client
|
|
17
|
+
) -> list[ContentDict]:
|
|
18
|
+
converted_message_params = []
|
|
19
|
+
for message_param in message_params:
|
|
20
|
+
if not isinstance(message_param, BaseMessageParam):
|
|
21
|
+
converted_message_params.append(message_param)
|
|
22
|
+
elif (role := message_param.role) == "system":
|
|
23
|
+
if not isinstance(message_param.content, str):
|
|
24
|
+
raise ValueError(
|
|
25
|
+
"System message content must be a single text string."
|
|
26
|
+
) # pragma: no cover
|
|
27
|
+
converted_message_params += [
|
|
28
|
+
{
|
|
29
|
+
"role": "system",
|
|
30
|
+
"parts": [PartDict(text=message_param.content)],
|
|
31
|
+
}
|
|
32
|
+
]
|
|
33
|
+
elif isinstance((content := message_param.content), str):
|
|
34
|
+
converted_message_params.append(
|
|
35
|
+
{
|
|
36
|
+
"role": role if role == "user" else "model",
|
|
37
|
+
"parts": [PartDict(text=content)],
|
|
38
|
+
}
|
|
39
|
+
)
|
|
40
|
+
else:
|
|
41
|
+
converted_content = []
|
|
42
|
+
for part in content:
|
|
43
|
+
if part.type == "text":
|
|
44
|
+
converted_content.append(PartDict(text=part.text))
|
|
45
|
+
elif part.type == "image":
|
|
46
|
+
if part.media_type not in [
|
|
47
|
+
"image/jpeg",
|
|
48
|
+
"image/png",
|
|
49
|
+
"image/webp",
|
|
50
|
+
"image/heic",
|
|
51
|
+
"image/heif",
|
|
52
|
+
]:
|
|
53
|
+
raise ValueError(
|
|
54
|
+
f"Unsupported image media type: {part.media_type}. "
|
|
55
|
+
"Google currently only supports JPEG, PNG, WebP, HEIC, "
|
|
56
|
+
"and HEIF images."
|
|
57
|
+
)
|
|
58
|
+
converted_content.append(
|
|
59
|
+
PartDict(
|
|
60
|
+
inline_data=BlobDict(
|
|
61
|
+
data=part.image, mime_type=part.media_type
|
|
62
|
+
)
|
|
63
|
+
)
|
|
64
|
+
)
|
|
65
|
+
elif part.type == "image_url":
|
|
66
|
+
if part.url.startswith(("https://", "http://")):
|
|
67
|
+
downloaded_image = io.BytesIO(_load_media(part.url))
|
|
68
|
+
image = PIL.Image.open(downloaded_image)
|
|
69
|
+
media_type = (
|
|
70
|
+
PIL.Image.MIME[image.format]
|
|
71
|
+
if image.format
|
|
72
|
+
else "image/unknown"
|
|
73
|
+
)
|
|
74
|
+
if media_type not in [
|
|
75
|
+
"image/jpeg",
|
|
76
|
+
"image/png",
|
|
77
|
+
"image/webp",
|
|
78
|
+
"image/heic",
|
|
79
|
+
"image/heif",
|
|
80
|
+
]:
|
|
81
|
+
raise ValueError(
|
|
82
|
+
f"Unsupported image media type: {media_type}. "
|
|
83
|
+
"Google currently only supports JPEG, PNG, WebP, HEIC, "
|
|
84
|
+
"and HEIF images."
|
|
85
|
+
)
|
|
86
|
+
if client.vertexai:
|
|
87
|
+
uri = part.url
|
|
88
|
+
else:
|
|
89
|
+
downloaded_image.seek(0)
|
|
90
|
+
file_ref = client.files.upload(
|
|
91
|
+
file=downloaded_image, config={"mime_type": media_type}
|
|
92
|
+
)
|
|
93
|
+
uri = file_ref.uri
|
|
94
|
+
media_type = file_ref.mime_type
|
|
95
|
+
|
|
96
|
+
converted_content.append(
|
|
97
|
+
PartDict(
|
|
98
|
+
file_data=FileDataDict(
|
|
99
|
+
file_uri=uri, mime_type=media_type
|
|
100
|
+
)
|
|
101
|
+
)
|
|
102
|
+
)
|
|
103
|
+
else:
|
|
104
|
+
media_type = "image/unknown"
|
|
105
|
+
uri = part.url
|
|
106
|
+
converted_content.append(
|
|
107
|
+
PartDict(
|
|
108
|
+
file_data=FileDataDict(
|
|
109
|
+
file_uri=uri, mime_type=media_type
|
|
110
|
+
)
|
|
111
|
+
)
|
|
112
|
+
)
|
|
113
|
+
elif part.type == "audio":
|
|
114
|
+
if part.media_type not in [
|
|
115
|
+
"audio/wav",
|
|
116
|
+
"audio/mp3",
|
|
117
|
+
"audio/aiff",
|
|
118
|
+
"audio/aac",
|
|
119
|
+
"audio/ogg",
|
|
120
|
+
"audio/flac",
|
|
121
|
+
]:
|
|
122
|
+
raise ValueError(
|
|
123
|
+
f"Unsupported audio media type: {part.media_type}. "
|
|
124
|
+
"Google currently only supports WAV, MP3, AIFF, AAC, OGG, "
|
|
125
|
+
"and FLAC audio file types."
|
|
126
|
+
)
|
|
127
|
+
converted_content.append(
|
|
128
|
+
PartDict(
|
|
129
|
+
inline_data=BlobDict(
|
|
130
|
+
data=part.audio
|
|
131
|
+
if isinstance(part.audio, bytes)
|
|
132
|
+
else base64.b64decode(part.audio),
|
|
133
|
+
mime_type=part.media_type,
|
|
134
|
+
)
|
|
135
|
+
)
|
|
136
|
+
)
|
|
137
|
+
elif part.type == "audio_url":
|
|
138
|
+
if part.url.startswith(("https://", "http://")):
|
|
139
|
+
audio = _load_media(part.url)
|
|
140
|
+
audio_type = get_audio_type(audio)
|
|
141
|
+
if audio_type not in [
|
|
142
|
+
"audio/wav",
|
|
143
|
+
"audio/mp3",
|
|
144
|
+
"audio/aiff",
|
|
145
|
+
"audio/aac",
|
|
146
|
+
"audio/ogg",
|
|
147
|
+
"audio/flac",
|
|
148
|
+
]:
|
|
149
|
+
raise ValueError(
|
|
150
|
+
f"Unsupported audio media type: {audio_type}. "
|
|
151
|
+
"Google currently only supports WAV, MP3, AIFF, AAC, OGG, "
|
|
152
|
+
"and FLAC audio file types."
|
|
153
|
+
)
|
|
154
|
+
converted_content.append(
|
|
155
|
+
{"mime_type": audio_type, "data": audio}
|
|
156
|
+
)
|
|
157
|
+
else:
|
|
158
|
+
audio_type = "audio/unknown"
|
|
159
|
+
converted_content.append(
|
|
160
|
+
PartDict(
|
|
161
|
+
file_data=FileDataDict(
|
|
162
|
+
file_uri=part.url, mime_type=audio_type
|
|
163
|
+
)
|
|
164
|
+
)
|
|
165
|
+
)
|
|
166
|
+
else:
|
|
167
|
+
raise ValueError(
|
|
168
|
+
"Google currently only supports text, image, and audio parts. "
|
|
169
|
+
f"Part provided: {part.type}"
|
|
170
|
+
)
|
|
171
|
+
converted_message_params.append(
|
|
172
|
+
{
|
|
173
|
+
"role": role if role == "user" else "model",
|
|
174
|
+
"parts": converted_content,
|
|
175
|
+
}
|
|
176
|
+
)
|
|
177
|
+
return converted_message_params
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""Get JSON output from a Google response."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from proto.marshal.collections import RepeatedComposite
|
|
6
|
+
|
|
7
|
+
from ..call_response import GoogleCallResponse
|
|
8
|
+
from ..call_response_chunk import GoogleCallResponseChunk
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def get_json_output(
|
|
12
|
+
response: GoogleCallResponse | GoogleCallResponseChunk, json_mode: bool
|
|
13
|
+
) -> str:
|
|
14
|
+
"""Extracts the JSON output from a Google response."""
|
|
15
|
+
if isinstance(response, GoogleCallResponse):
|
|
16
|
+
if json_mode and (content := response.content):
|
|
17
|
+
json_start = content.index("{")
|
|
18
|
+
json_end = content.rfind("}")
|
|
19
|
+
return content[json_start : json_end + 1]
|
|
20
|
+
elif tool_calls := [
|
|
21
|
+
function_call
|
|
22
|
+
for function_call in (response.response.function_calls or []) # pyright: ignore [reportOptionalSubscript, reportOptionalIterable, reportOptionalMemberAccess]
|
|
23
|
+
if function_call.args # pyright: ignore [reportOptionalSubscript, reportOptionalIterable, reportOptionalMemberAccess]
|
|
24
|
+
]:
|
|
25
|
+
return json.dumps(
|
|
26
|
+
{
|
|
27
|
+
k: v if not isinstance(v, RepeatedComposite) else list(v)
|
|
28
|
+
for k, v in (tool_calls[0].args or {}).items() # pyright: ignore [reportOptionalMemberAccess]
|
|
29
|
+
}
|
|
30
|
+
if isinstance(tool_calls, list) and tool_calls[0]
|
|
31
|
+
else {}
|
|
32
|
+
)
|
|
33
|
+
else:
|
|
34
|
+
raise ValueError("No tool call or JSON object found in response.")
|
|
35
|
+
elif not json_mode:
|
|
36
|
+
raise ValueError("Google only supports structured streaming in json mode.")
|
|
37
|
+
return response.content
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Handles streaming content and tools from the Google API."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import AsyncGenerator, Generator
|
|
4
|
+
|
|
5
|
+
from google.genai.types import GenerateContentResponse
|
|
6
|
+
|
|
7
|
+
from ..call_response_chunk import GoogleCallResponseChunk
|
|
8
|
+
from ..tool import GoogleTool
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def handle_stream(
|
|
12
|
+
stream: Generator[GenerateContentResponse, None, None],
|
|
13
|
+
tool_types: list[type[GoogleTool]] | None = None,
|
|
14
|
+
partial_tools: bool = False,
|
|
15
|
+
) -> Generator[tuple[GoogleCallResponseChunk, None], None, None]:
|
|
16
|
+
"""Iterator over the stream and constructs tools as they are streamed.
|
|
17
|
+
|
|
18
|
+
Note: google does not currently support streaming tools.
|
|
19
|
+
"""
|
|
20
|
+
for chunk in stream:
|
|
21
|
+
yield GoogleCallResponseChunk(chunk=chunk), None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
async def handle_stream_async(
|
|
25
|
+
stream: AsyncGenerator[GenerateContentResponse, None],
|
|
26
|
+
tool_types: list[type[GoogleTool]] | None = None,
|
|
27
|
+
partial_tools: bool = False,
|
|
28
|
+
) -> AsyncGenerator[tuple[GoogleCallResponseChunk, None], None]:
|
|
29
|
+
"""
|
|
30
|
+
Async iterator over the stream and constructs tools as they are streamed.
|
|
31
|
+
|
|
32
|
+
Note: google does not currently support streaming tools.
|
|
33
|
+
"""
|
|
34
|
+
async for chunk in stream:
|
|
35
|
+
yield GoogleCallResponseChunk(chunk=chunk), None
|