langchain-google-genai 1.0.7__py3-none-any.whl → 1.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-google-genai might be problematic. Click here for more details.
- langchain_google_genai/_function_utils.py +157 -168
- langchain_google_genai/chat_models.py +274 -65
- langchain_google_genai/embeddings.py +6 -6
- langchain_google_genai/llms.py +4 -4
- {langchain_google_genai-1.0.7.dist-info → langchain_google_genai-1.0.8.dist-info}/METADATA +2 -2
- {langchain_google_genai-1.0.7.dist-info → langchain_google_genai-1.0.8.dist-info}/RECORD +8 -8
- {langchain_google_genai-1.0.7.dist-info → langchain_google_genai-1.0.8.dist-info}/LICENSE +0 -0
- {langchain_google_genai-1.0.7.dist-info → langchain_google_genai-1.0.8.dist-info}/WHEEL +0 -0
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import collections
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
3
6
|
from typing import (
|
|
4
7
|
Any,
|
|
5
8
|
Callable,
|
|
@@ -16,14 +19,21 @@ from typing import (
|
|
|
16
19
|
)
|
|
17
20
|
|
|
18
21
|
import google.ai.generativelanguage as glm
|
|
19
|
-
|
|
20
|
-
|
|
22
|
+
import google.ai.generativelanguage_v1beta.types as gapic
|
|
23
|
+
import proto # type: ignore[import]
|
|
21
24
|
from google.generativeai.types.content_types import ToolDict # type: ignore[import]
|
|
22
25
|
from langchain_core.pydantic_v1 import BaseModel
|
|
23
26
|
from langchain_core.tools import BaseTool
|
|
24
27
|
from langchain_core.tools import tool as callable_as_lc_tool
|
|
28
|
+
from langchain_core.utils.function_calling import (
|
|
29
|
+
FunctionDescription,
|
|
30
|
+
convert_to_openai_tool,
|
|
31
|
+
)
|
|
25
32
|
from langchain_core.utils.json_schema import dereference_refs
|
|
26
33
|
|
|
34
|
+
logger = logging.getLogger(__name__)
|
|
35
|
+
|
|
36
|
+
|
|
27
37
|
TYPE_ENUM = {
|
|
28
38
|
"string": glm.Type.STRING,
|
|
29
39
|
"number": glm.Type.NUMBER,
|
|
@@ -34,6 +44,17 @@ TYPE_ENUM = {
|
|
|
34
44
|
}
|
|
35
45
|
|
|
36
46
|
TYPE_ENUM_REVERSE = {v: k for k, v in TYPE_ENUM.items()}
|
|
47
|
+
_ALLOWED_SCHEMA_FIELDS = []
|
|
48
|
+
_ALLOWED_SCHEMA_FIELDS.extend([f.name for f in gapic.Schema()._pb.DESCRIPTOR.fields])
|
|
49
|
+
_ALLOWED_SCHEMA_FIELDS.extend(
|
|
50
|
+
[
|
|
51
|
+
f
|
|
52
|
+
for f in gapic.Schema.to_dict(
|
|
53
|
+
gapic.Schema(), preserving_proto_field_name=False
|
|
54
|
+
).keys()
|
|
55
|
+
]
|
|
56
|
+
)
|
|
57
|
+
_ALLOWED_SCHEMA_FIELDS_SET = set(_ALLOWED_SCHEMA_FIELDS)
|
|
37
58
|
|
|
38
59
|
|
|
39
60
|
class _ToolDictLike(TypedDict):
|
|
@@ -52,7 +73,7 @@ class _ToolDict(TypedDict):
|
|
|
52
73
|
|
|
53
74
|
# Info: This is a FunctionDeclaration(=fc).
|
|
54
75
|
_FunctionDeclarationLike = Union[
|
|
55
|
-
BaseTool, Type[BaseModel], FunctionDeclaration, Callable, Dict[str, Any]
|
|
76
|
+
BaseTool, Type[BaseModel], gapic.FunctionDeclaration, Callable, Dict[str, Any]
|
|
56
77
|
]
|
|
57
78
|
|
|
58
79
|
# Info: This mean one tool.
|
|
@@ -60,10 +81,10 @@ _FunctionDeclarationLikeList = Sequence[_FunctionDeclarationLike]
|
|
|
60
81
|
|
|
61
82
|
|
|
62
83
|
# Info: This means one tool=Sequence of FunctionDeclaration
|
|
63
|
-
# The dict should be
|
|
84
|
+
# The dict should be gapic.Tool like. {"function_declarations": [ { "name": ...}.
|
|
64
85
|
# OpenAI like dict is not be accepted. {{'type': 'function', 'function': {'name': ...}
|
|
65
86
|
_ToolsType = Union[
|
|
66
|
-
|
|
87
|
+
gapic.Tool,
|
|
67
88
|
ToolDict,
|
|
68
89
|
_ToolDictLike,
|
|
69
90
|
_FunctionDeclarationLikeList,
|
|
@@ -71,183 +92,151 @@ _ToolsType = Union[
|
|
|
71
92
|
]
|
|
72
93
|
|
|
73
94
|
|
|
74
|
-
|
|
75
|
-
|
|
95
|
+
def _format_json_schema_to_gapic(schema: Dict[str, Any]) -> Dict[str, Any]:
|
|
96
|
+
converted_schema: Dict[str, Any] = {}
|
|
97
|
+
for key, value in schema.items():
|
|
98
|
+
if key == "definitions":
|
|
99
|
+
continue
|
|
100
|
+
elif key == "items":
|
|
101
|
+
converted_schema["items"] = _format_json_schema_to_gapic(value)
|
|
102
|
+
elif key == "properties":
|
|
103
|
+
if "properties" not in converted_schema:
|
|
104
|
+
converted_schema["properties"] = {}
|
|
105
|
+
for pkey, pvalue in value.items():
|
|
106
|
+
converted_schema["properties"][pkey] = _format_json_schema_to_gapic(
|
|
107
|
+
pvalue
|
|
108
|
+
)
|
|
109
|
+
continue
|
|
110
|
+
elif key in ["type", "_type"]:
|
|
111
|
+
converted_schema["type"] = str(value).upper()
|
|
112
|
+
elif key not in _ALLOWED_SCHEMA_FIELDS_SET:
|
|
113
|
+
logger.warning(f"Key '{key}' is not supported in schema, ignoring")
|
|
114
|
+
else:
|
|
115
|
+
converted_schema[key] = value
|
|
116
|
+
return converted_schema
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _dict_to_gapic_schema(schema: Dict[str, Any]) -> gapic.Schema:
|
|
120
|
+
dereferenced_schema = dereference_refs(schema)
|
|
121
|
+
formatted_schema = _format_json_schema_to_gapic(dereferenced_schema)
|
|
122
|
+
json_schema = json.dumps(formatted_schema)
|
|
123
|
+
return gapic.Schema.from_json(json_schema)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _format_dict_to_function_declaration(
|
|
127
|
+
tool: Union[FunctionDescription, Dict[str, Any]],
|
|
128
|
+
) -> gapic.FunctionDeclaration:
|
|
129
|
+
return gapic.FunctionDeclaration(
|
|
130
|
+
name=tool.get("name"),
|
|
131
|
+
description=tool.get("description"),
|
|
132
|
+
parameters=_dict_to_gapic_schema(tool.get("parameters", {})),
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
# Info: gapic.Tool means function_declarations and proto.Message.
|
|
76
137
|
def convert_to_genai_function_declarations(
|
|
77
|
-
|
|
78
|
-
) ->
|
|
79
|
-
if isinstance(
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
)
|
|
84
|
-
elif isinstance(tool, (BaseTool, FunctionDeclaration)):
|
|
85
|
-
# single _FunctionDeclarationLike
|
|
86
|
-
return GoogleTool(
|
|
87
|
-
function_declarations=[_convert_fc_like_to_genai_function(tool)]
|
|
88
|
-
)
|
|
89
|
-
elif isinstance(tool, type) and issubclass(tool, BaseModel):
|
|
90
|
-
# single _FunctionDeclarationLike
|
|
91
|
-
return GoogleTool(
|
|
92
|
-
function_declarations=[_convert_fc_like_to_genai_function(tool)]
|
|
93
|
-
)
|
|
94
|
-
elif isinstance(tool, GoogleTool):
|
|
95
|
-
return cast(GoogleTool, tool)
|
|
96
|
-
elif callable(tool):
|
|
97
|
-
return GoogleTool(
|
|
98
|
-
function_declarations=[
|
|
99
|
-
_convert_tool_to_genai_function(callable_as_lc_tool()(tool))
|
|
100
|
-
]
|
|
138
|
+
tools: Sequence[_ToolsType],
|
|
139
|
+
) -> gapic.Tool:
|
|
140
|
+
if not isinstance(tools, collections.abc.Sequence):
|
|
141
|
+
logger.warning(
|
|
142
|
+
"convert_to_genai_function_declarations expects a Sequence "
|
|
143
|
+
"and not a single tool."
|
|
101
144
|
)
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
name=name, description=description, parameters=parameters
|
|
130
|
-
)
|
|
131
|
-
function_declarations.append(function_declaration)
|
|
132
|
-
return {"function_declarations": function_declarations}
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
def _convert_fc_likes_to_genai_function(
|
|
136
|
-
fc_likes: _FunctionDeclarationLikeList,
|
|
137
|
-
) -> Sequence[FunctionDeclaration]:
|
|
138
|
-
if isinstance(fc_likes, list):
|
|
139
|
-
return [_convert_fc_like_to_genai_function(fc) for fc in fc_likes]
|
|
140
|
-
raise ValueError(f"Unsupported fc_likes type {fc_likes}")
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
def _convert_fc_like_to_genai_function(
|
|
144
|
-
fc_like: _FunctionDeclarationLike,
|
|
145
|
-
) -> FunctionDeclaration:
|
|
146
|
-
if isinstance(fc_like, BaseTool):
|
|
147
|
-
return _convert_tool_to_genai_function(fc_like)
|
|
148
|
-
elif isinstance(fc_like, type) and issubclass(fc_like, BaseModel):
|
|
149
|
-
return _convert_pydantic_to_genai_function(fc_like)
|
|
150
|
-
elif isinstance(fc_like, dict):
|
|
151
|
-
# TODO: add declaration_index
|
|
152
|
-
return _convert_dict_to_genai_function(fc_like)
|
|
153
|
-
elif callable(fc_like):
|
|
154
|
-
return _convert_tool_to_genai_function(callable_as_lc_tool()(fc_like))
|
|
155
|
-
else:
|
|
156
|
-
raise ValueError(f"Unsupported fc_like type {fc_like}")
|
|
157
|
-
|
|
145
|
+
tools = [tools]
|
|
146
|
+
gapic_tool = gapic.Tool()
|
|
147
|
+
for tool in tools:
|
|
148
|
+
if isinstance(tool, gapic.Tool):
|
|
149
|
+
gapic_tool.function_declarations.extend(tool.function_declarations)
|
|
150
|
+
elif isinstance(tool, dict):
|
|
151
|
+
if "function_declarations" not in tool:
|
|
152
|
+
fd = _format_to_gapic_function_declaration(tool)
|
|
153
|
+
gapic_tool.function_declarations.append(fd)
|
|
154
|
+
continue
|
|
155
|
+
tool = cast(_ToolDictLike, tool)
|
|
156
|
+
function_declarations = tool["function_declarations"]
|
|
157
|
+
if not isinstance(function_declarations, collections.abc.Sequence):
|
|
158
|
+
raise ValueError(
|
|
159
|
+
"function_declarations should be a list"
|
|
160
|
+
f"got '{type(function_declarations)}'"
|
|
161
|
+
)
|
|
162
|
+
if function_declarations:
|
|
163
|
+
fds = [
|
|
164
|
+
_format_to_gapic_function_declaration(fd)
|
|
165
|
+
for fd in function_declarations
|
|
166
|
+
]
|
|
167
|
+
gapic_tool.function_declarations.extend(fds)
|
|
168
|
+
else:
|
|
169
|
+
fd = _format_to_gapic_function_declaration(tool)
|
|
170
|
+
gapic_tool.function_declarations.append(fd)
|
|
171
|
+
return gapic_tool
|
|
158
172
|
|
|
159
|
-
def _convert_tool_dict_to_genai_functions(
|
|
160
|
-
tool_dict: _ToolDictLike,
|
|
161
|
-
) -> Sequence[FunctionDeclaration]:
|
|
162
|
-
if "function_declarations" in tool_dict:
|
|
163
|
-
return _convert_dicts_to_genai_functions(tool_dict["function_declarations"]) # type: ignore
|
|
164
|
-
else:
|
|
165
|
-
raise ValueError(f"Unsupported function tool_dict type {tool_dict}")
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
def _convert_dict_to_genai_functions(
|
|
169
|
-
function_declarations_dict: Dict[str, Any],
|
|
170
|
-
) -> Sequence[FunctionDeclaration]:
|
|
171
|
-
if "function_declarations" in function_declarations_dict:
|
|
172
|
-
# GoogleTool like
|
|
173
|
-
return [
|
|
174
|
-
_convert_dict_to_genai_function(fc, i)
|
|
175
|
-
for i, fc in enumerate(function_declarations_dict["function_declarations"])
|
|
176
|
-
]
|
|
177
|
-
d = function_declarations_dict
|
|
178
|
-
if "name" in d and "description" in d and "parameters" in d:
|
|
179
|
-
# _FunctionDeclarationDict
|
|
180
|
-
return [_convert_dict_to_genai_function(d)]
|
|
181
|
-
else:
|
|
182
|
-
# OpenAI like?
|
|
183
|
-
raise ValueError(f"Unsupported function call type {function_declarations_dict}")
|
|
184
173
|
|
|
174
|
+
def tool_to_dict(tool: gapic.Tool) -> _ToolDict:
|
|
175
|
+
def _traverse_values(raw: Any) -> Any:
|
|
176
|
+
if isinstance(raw, list):
|
|
177
|
+
return [_traverse_values(v) for v in raw]
|
|
178
|
+
if isinstance(raw, dict):
|
|
179
|
+
return {k: _traverse_values(v) for k, v in raw.items()}
|
|
180
|
+
if isinstance(raw, proto.Message):
|
|
181
|
+
return _traverse_values(type(raw).to_dict(raw))
|
|
182
|
+
return raw
|
|
185
183
|
|
|
186
|
-
|
|
187
|
-
function_declaration_dicts: Sequence[Dict[str, Any]],
|
|
188
|
-
) -> Sequence[FunctionDeclaration]:
|
|
189
|
-
return [
|
|
190
|
-
_convert_dict_to_genai_function(function_declaration_dict, i)
|
|
191
|
-
for i, function_declaration_dict in enumerate(function_declaration_dicts)
|
|
192
|
-
]
|
|
184
|
+
return _traverse_values(type(tool).to_dict(tool))
|
|
193
185
|
|
|
194
186
|
|
|
195
|
-
def
|
|
196
|
-
|
|
197
|
-
) -> FunctionDeclaration:
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
},
|
|
213
|
-
"required": function_declaration_dict.get("parameters", []).get(
|
|
214
|
-
"required", []
|
|
215
|
-
),
|
|
216
|
-
"type_": TYPE_ENUM[function_declaration_dict["parameters"]["type"]],
|
|
217
|
-
}
|
|
218
|
-
return FunctionDeclaration(**formatted_fc)
|
|
187
|
+
def _format_to_gapic_function_declaration(
|
|
188
|
+
tool: _FunctionDeclarationLike,
|
|
189
|
+
) -> gapic.FunctionDeclaration:
|
|
190
|
+
if isinstance(tool, BaseTool):
|
|
191
|
+
return _format_base_tool_to_function_declaration(tool)
|
|
192
|
+
elif isinstance(tool, type) and issubclass(tool, BaseModel):
|
|
193
|
+
return _convert_pydantic_to_genai_function(tool)
|
|
194
|
+
elif isinstance(tool, dict):
|
|
195
|
+
if all(k in tool for k in ("name", "description")) and "parameters" not in tool:
|
|
196
|
+
function = cast(dict, tool)
|
|
197
|
+
function["parameters"] = {}
|
|
198
|
+
else:
|
|
199
|
+
function = convert_to_openai_tool(cast(dict, tool))["function"]
|
|
200
|
+
return _format_dict_to_function_declaration(cast(FunctionDescription, function))
|
|
201
|
+
elif callable(tool):
|
|
202
|
+
return _format_base_tool_to_function_declaration(callable_as_lc_tool()(tool))
|
|
203
|
+
raise ValueError(f"Unsupported tool type {tool}")
|
|
219
204
|
|
|
220
205
|
|
|
221
|
-
def
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
fc, tool_name=tool.name, tool_description=tool.description
|
|
227
|
-
)
|
|
228
|
-
raise ValueError(f"Unsupported function call type {fc}")
|
|
229
|
-
else:
|
|
230
|
-
return FunctionDeclaration(
|
|
206
|
+
def _format_base_tool_to_function_declaration(
|
|
207
|
+
tool: BaseTool,
|
|
208
|
+
) -> gapic.FunctionDeclaration:
|
|
209
|
+
if not tool.args_schema:
|
|
210
|
+
return gapic.FunctionDeclaration(
|
|
231
211
|
name=tool.name,
|
|
232
212
|
description=tool.description,
|
|
233
|
-
parameters=
|
|
234
|
-
|
|
235
|
-
|
|
213
|
+
parameters=gapic.Schema(
|
|
214
|
+
type=gapic.Type.OBJECT,
|
|
215
|
+
properties={
|
|
216
|
+
"__arg1": gapic.Schema(type=gapic.Type.STRING),
|
|
236
217
|
},
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
},
|
|
218
|
+
required=["__arg1"],
|
|
219
|
+
),
|
|
240
220
|
)
|
|
241
221
|
|
|
222
|
+
schema = tool.args_schema.schema()
|
|
223
|
+
parameters = _dict_to_gapic_schema(schema)
|
|
224
|
+
|
|
225
|
+
return gapic.FunctionDeclaration(
|
|
226
|
+
name=tool.name or schema.get("title"),
|
|
227
|
+
description=tool.description or schema.get("description"),
|
|
228
|
+
parameters=parameters,
|
|
229
|
+
)
|
|
230
|
+
|
|
242
231
|
|
|
243
232
|
def _convert_pydantic_to_genai_function(
|
|
244
233
|
pydantic_model: Type[BaseModel],
|
|
245
234
|
tool_name: Optional[str] = None,
|
|
246
235
|
tool_description: Optional[str] = None,
|
|
247
|
-
) -> FunctionDeclaration:
|
|
236
|
+
) -> gapic.FunctionDeclaration:
|
|
248
237
|
schema = dereference_refs(pydantic_model.schema())
|
|
249
238
|
schema.pop("definitions", None)
|
|
250
|
-
function_declaration = FunctionDeclaration(
|
|
239
|
+
function_declaration = gapic.FunctionDeclaration(
|
|
251
240
|
name=tool_name if tool_name else schema.get("title"),
|
|
252
241
|
description=tool_description if tool_description else schema.get("description"),
|
|
253
242
|
parameters={
|
|
@@ -290,7 +279,7 @@ _ToolChoiceType = Union[
|
|
|
290
279
|
|
|
291
280
|
|
|
292
281
|
class _FunctionCallingConfigDict(TypedDict):
|
|
293
|
-
mode: Union[FunctionCallingConfig.Mode, str]
|
|
282
|
+
mode: Union[gapic.FunctionCallingConfig.Mode, str]
|
|
294
283
|
allowed_function_names: Optional[List[str]]
|
|
295
284
|
|
|
296
285
|
|
|
@@ -304,17 +293,17 @@ def _tool_choice_to_tool_config(
|
|
|
304
293
|
) -> _ToolConfigDict:
|
|
305
294
|
allowed_function_names: Optional[List[str]] = None
|
|
306
295
|
if tool_choice is True or tool_choice == "any":
|
|
307
|
-
mode = "
|
|
296
|
+
mode = "ANY"
|
|
308
297
|
allowed_function_names = all_names
|
|
309
298
|
elif tool_choice == "auto":
|
|
310
|
-
mode = "
|
|
299
|
+
mode = "AUTO"
|
|
311
300
|
elif tool_choice == "none":
|
|
312
|
-
mode = "
|
|
301
|
+
mode = "NONE"
|
|
313
302
|
elif isinstance(tool_choice, str):
|
|
314
|
-
mode = "
|
|
303
|
+
mode = "ANY"
|
|
315
304
|
allowed_function_names = [tool_choice]
|
|
316
305
|
elif isinstance(tool_choice, list):
|
|
317
|
-
mode = "
|
|
306
|
+
mode = "ANY"
|
|
318
307
|
allowed_function_names = tool_choice
|
|
319
308
|
elif isinstance(tool_choice, dict):
|
|
320
309
|
if "mode" in tool_choice:
|
|
@@ -334,7 +323,7 @@ def _tool_choice_to_tool_config(
|
|
|
334
323
|
raise ValueError(f"Unrecognized tool choice format:\n\n{tool_choice=}")
|
|
335
324
|
return _ToolConfigDict(
|
|
336
325
|
function_calling_config={
|
|
337
|
-
"mode": mode,
|
|
326
|
+
"mode": mode.upper(),
|
|
338
327
|
"allowed_function_names": allowed_function_names,
|
|
339
328
|
}
|
|
340
329
|
)
|
|
@@ -60,13 +60,11 @@ from langchain_core.messages import (
|
|
|
60
60
|
BaseMessage,
|
|
61
61
|
FunctionMessage,
|
|
62
62
|
HumanMessage,
|
|
63
|
-
InvalidToolCall,
|
|
64
63
|
SystemMessage,
|
|
65
|
-
ToolCall,
|
|
66
|
-
ToolCallChunk,
|
|
67
64
|
ToolMessage,
|
|
68
65
|
)
|
|
69
66
|
from langchain_core.messages.ai import UsageMetadata
|
|
67
|
+
from langchain_core.messages.tool import invalid_tool_call, tool_call, tool_call_chunk
|
|
70
68
|
from langchain_core.output_parsers.base import OutputParserLike
|
|
71
69
|
from langchain_core.output_parsers.openai_tools import (
|
|
72
70
|
JsonOutputToolsParser,
|
|
@@ -140,7 +138,7 @@ def _create_retry_decorator() -> Callable[[Any], Any]:
|
|
|
140
138
|
multiplier = 2
|
|
141
139
|
min_seconds = 1
|
|
142
140
|
max_seconds = 60
|
|
143
|
-
max_retries =
|
|
141
|
+
max_retries = 2
|
|
144
142
|
|
|
145
143
|
return retry(
|
|
146
144
|
reraise=True,
|
|
@@ -466,9 +464,6 @@ def _parse_response_candidate(
|
|
|
466
464
|
raise Exception("Unexpected content type")
|
|
467
465
|
|
|
468
466
|
if part.function_call:
|
|
469
|
-
# TODO: support multiple function calls
|
|
470
|
-
if "function_call" in additional_kwargs:
|
|
471
|
-
raise Exception("Multiple function calls are not currently supported")
|
|
472
467
|
function_call = {"name": part.function_call.name}
|
|
473
468
|
# dump to match other function calling llm for now
|
|
474
469
|
function_call_args_dict = proto.Message.to_dict(part.function_call)["args"]
|
|
@@ -479,7 +474,7 @@ def _parse_response_candidate(
|
|
|
479
474
|
|
|
480
475
|
if streaming:
|
|
481
476
|
tool_call_chunks.append(
|
|
482
|
-
|
|
477
|
+
tool_call_chunk(
|
|
483
478
|
name=function_call.get("name"),
|
|
484
479
|
args=function_call.get("arguments"),
|
|
485
480
|
id=function_call.get("id", str(uuid.uuid4())),
|
|
@@ -488,27 +483,27 @@ def _parse_response_candidate(
|
|
|
488
483
|
)
|
|
489
484
|
else:
|
|
490
485
|
try:
|
|
491
|
-
|
|
486
|
+
tool_call_dict = parse_tool_calls(
|
|
492
487
|
[{"function": function_call}],
|
|
493
488
|
return_id=False,
|
|
494
|
-
)
|
|
495
|
-
tool_calls = [
|
|
496
|
-
ToolCall(
|
|
497
|
-
name=tool_call["name"],
|
|
498
|
-
args=tool_call["args"],
|
|
499
|
-
id=tool_call.get("id", str(uuid.uuid4())),
|
|
500
|
-
)
|
|
501
|
-
for tool_call in tool_calls_dicts
|
|
502
|
-
]
|
|
489
|
+
)[0]
|
|
503
490
|
except Exception as e:
|
|
504
|
-
invalid_tool_calls
|
|
505
|
-
|
|
491
|
+
invalid_tool_calls.append(
|
|
492
|
+
invalid_tool_call(
|
|
506
493
|
name=function_call.get("name"),
|
|
507
494
|
args=function_call.get("arguments"),
|
|
508
495
|
id=function_call.get("id", str(uuid.uuid4())),
|
|
509
496
|
error=str(e),
|
|
510
497
|
)
|
|
511
|
-
|
|
498
|
+
)
|
|
499
|
+
else:
|
|
500
|
+
tool_calls.append(
|
|
501
|
+
tool_call(
|
|
502
|
+
name=tool_call_dict["name"],
|
|
503
|
+
args=tool_call_dict["args"],
|
|
504
|
+
id=tool_call_dict.get("id", str(uuid.uuid4())),
|
|
505
|
+
)
|
|
506
|
+
)
|
|
512
507
|
if content is None:
|
|
513
508
|
content = ""
|
|
514
509
|
|
|
@@ -594,33 +589,215 @@ def _is_event_loop_running() -> bool:
|
|
|
594
589
|
|
|
595
590
|
|
|
596
591
|
class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
597
|
-
"""`Google
|
|
592
|
+
"""`Google AI` chat models integration.
|
|
598
593
|
|
|
599
|
-
|
|
594
|
+
Instantiation:
|
|
595
|
+
To use, you must have either:
|
|
600
596
|
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
597
|
+
1. The ``GOOGLE_API_KEY``` environment variable set with your API key, or
|
|
598
|
+
2. Pass your API key using the google_api_key kwarg to the ChatGoogle
|
|
599
|
+
constructor.
|
|
604
600
|
|
|
605
|
-
Example:
|
|
606
601
|
.. code-block:: python
|
|
607
602
|
|
|
608
603
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
609
|
-
chat = ChatGoogleGenerativeAI(model="gemini-pro")
|
|
610
|
-
chat.invoke("Write me a ballad about LangChain")
|
|
611
604
|
|
|
612
|
-
|
|
605
|
+
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
|
|
606
|
+
llm.invoke("Write me a ballad about LangChain")
|
|
607
|
+
|
|
608
|
+
Invoke:
|
|
609
|
+
.. code-block:: python
|
|
610
|
+
|
|
611
|
+
messages = [
|
|
612
|
+
("system", "Translate the user sentence to French."),
|
|
613
|
+
("human", "I love programming."),
|
|
614
|
+
]
|
|
615
|
+
llm.invoke(messages)
|
|
616
|
+
|
|
617
|
+
.. code-block:: python
|
|
618
|
+
|
|
619
|
+
AIMessage(
|
|
620
|
+
content="J'adore programmer. \\n",
|
|
621
|
+
response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]},
|
|
622
|
+
id='run-56cecc34-2e54-4b52-a974-337e47008ad2-0',
|
|
623
|
+
usage_metadata={'input_tokens': 18, 'output_tokens': 5, 'total_tokens': 23}
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
Stream:
|
|
627
|
+
.. code-block:: python
|
|
628
|
+
|
|
629
|
+
for chunk in llm.stream(messages):
|
|
630
|
+
print(chunk)
|
|
631
|
+
|
|
632
|
+
.. code-block:: python
|
|
633
|
+
|
|
634
|
+
AIMessageChunk(content='J', response_metadata={'finish_reason': 'STOP', 'safety_ratings': []}, id='run-e905f4f4-58cb-4a10-a960-448a2bb649e3', usage_metadata={'input_tokens': 18, 'output_tokens': 1, 'total_tokens': 19})
|
|
635
|
+
AIMessageChunk(content="'adore programmer. \n", response_metadata={'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}, id='run-e905f4f4-58cb-4a10-a960-448a2bb649e3', usage_metadata={'input_tokens': 18, 'output_tokens': 5, 'total_tokens': 23})
|
|
636
|
+
|
|
637
|
+
.. code-block:: python
|
|
638
|
+
|
|
639
|
+
stream = llm.stream(messages)
|
|
640
|
+
full = next(stream)
|
|
641
|
+
for chunk in stream:
|
|
642
|
+
full += chunk
|
|
643
|
+
full
|
|
644
|
+
|
|
645
|
+
.. code-block:: python
|
|
646
|
+
|
|
647
|
+
AIMessageChunk(
|
|
648
|
+
content="J'adore programmer. \\n",
|
|
649
|
+
response_metadata={'finish_reason': 'STOPSTOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]},
|
|
650
|
+
id='run-3ce13a42-cd30-4ad7-a684-f1f0b37cdeec',
|
|
651
|
+
usage_metadata={'input_tokens': 36, 'output_tokens': 6, 'total_tokens': 42}
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
Async:
|
|
655
|
+
.. code-block:: python
|
|
656
|
+
|
|
657
|
+
await llm.ainvoke(messages)
|
|
658
|
+
|
|
659
|
+
# stream:
|
|
660
|
+
# async for chunk in (await llm.astream(messages))
|
|
661
|
+
|
|
662
|
+
# batch:
|
|
663
|
+
# await llm.abatch([messages])
|
|
664
|
+
|
|
665
|
+
Tool calling:
|
|
666
|
+
.. code-block:: python
|
|
667
|
+
|
|
668
|
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
669
|
+
|
|
670
|
+
|
|
671
|
+
class GetWeather(BaseModel):
|
|
672
|
+
'''Get the current weather in a given location'''
|
|
673
|
+
|
|
674
|
+
location: str = Field(
|
|
675
|
+
..., description="The city and state, e.g. San Francisco, CA"
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
|
|
679
|
+
class GetPopulation(BaseModel):
|
|
680
|
+
'''Get the current population in a given location'''
|
|
681
|
+
|
|
682
|
+
location: str = Field(
|
|
683
|
+
..., description="The city and state, e.g. San Francisco, CA"
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
|
|
687
|
+
llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
|
|
688
|
+
ai_msg = llm_with_tools.invoke(
|
|
689
|
+
"Which city is hotter today and which is bigger: LA or NY?"
|
|
690
|
+
)
|
|
691
|
+
ai_msg.tool_calls
|
|
692
|
+
|
|
693
|
+
.. code-block:: python
|
|
694
|
+
|
|
695
|
+
[{'name': 'GetWeather',
|
|
696
|
+
'args': {'location': 'Los Angeles, CA'},
|
|
697
|
+
'id': 'c186c99f-f137-4d52-947f-9e3deabba6f6'},
|
|
698
|
+
{'name': 'GetWeather',
|
|
699
|
+
'args': {'location': 'New York City, NY'},
|
|
700
|
+
'id': 'cebd4a5d-e800-4fa5-babd-4aa286af4f31'},
|
|
701
|
+
{'name': 'GetPopulation',
|
|
702
|
+
'args': {'location': 'Los Angeles, CA'},
|
|
703
|
+
'id': '4f92d897-f5e4-4d34-a3bc-93062c92591e'},
|
|
704
|
+
{'name': 'GetPopulation',
|
|
705
|
+
'args': {'location': 'New York City, NY'},
|
|
706
|
+
'id': '634582de-5186-4e4b-968b-f192f0a93678'}]
|
|
707
|
+
|
|
708
|
+
Structured output:
|
|
709
|
+
.. code-block:: python
|
|
710
|
+
|
|
711
|
+
from typing import Optional
|
|
712
|
+
|
|
713
|
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
class Joke(BaseModel):
|
|
717
|
+
'''Joke to tell user.'''
|
|
718
|
+
|
|
719
|
+
setup: str = Field(description="The setup of the joke")
|
|
720
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
721
|
+
rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
|
|
722
|
+
|
|
723
|
+
|
|
724
|
+
structured_llm = llm.with_structured_output(Joke)
|
|
725
|
+
structured_llm.invoke("Tell me a joke about cats")
|
|
613
726
|
|
|
614
|
-
|
|
615
|
-
|
|
727
|
+
.. code-block:: python
|
|
728
|
+
|
|
729
|
+
Joke(
|
|
730
|
+
setup='Why are cats so good at video games?',
|
|
731
|
+
punchline='They have nine lives on the internet',
|
|
732
|
+
rating=None
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
Image input:
|
|
736
|
+
.. code-block:: python
|
|
737
|
+
|
|
738
|
+
import base64
|
|
739
|
+
import httpx
|
|
740
|
+
from langchain_core.messages import HumanMessage
|
|
741
|
+
|
|
742
|
+
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
|
|
743
|
+
image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
|
|
744
|
+
message = HumanMessage(
|
|
745
|
+
content=[
|
|
746
|
+
{"type": "text", "text": "describe the weather in this image"},
|
|
747
|
+
{
|
|
748
|
+
"type": "image_url",
|
|
749
|
+
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
|
|
750
|
+
},
|
|
751
|
+
]
|
|
752
|
+
)
|
|
753
|
+
ai_msg = llm.invoke([message])
|
|
754
|
+
ai_msg.content
|
|
755
|
+
|
|
756
|
+
.. code-block:: python
|
|
757
|
+
|
|
758
|
+
'The weather in this image appears to be sunny and pleasant. The sky is a bright blue with scattered white clouds, suggesting fair weather. The lush green grass and trees indicate a warm and possibly slightly breezy day. There are no signs of rain or storms. \n'
|
|
759
|
+
|
|
760
|
+
Token usage:
|
|
761
|
+
.. code-block:: python
|
|
762
|
+
|
|
763
|
+
ai_msg = llm.invoke(messages)
|
|
764
|
+
ai_msg.usage_metadata
|
|
765
|
+
|
|
766
|
+
.. code-block:: python
|
|
767
|
+
|
|
768
|
+
{'input_tokens': 18, 'output_tokens': 5, 'total_tokens': 23}
|
|
769
|
+
|
|
770
|
+
|
|
771
|
+
Response metadata
|
|
772
|
+
.. code-block:: python
|
|
773
|
+
|
|
774
|
+
ai_msg = llm.invoke(messages)
|
|
775
|
+
ai_msg.response_metadata
|
|
776
|
+
|
|
777
|
+
.. code-block:: python
|
|
778
|
+
|
|
779
|
+
{
|
|
780
|
+
'prompt_feedback': {'block_reason': 0, 'safety_ratings': []},
|
|
781
|
+
'finish_reason': 'STOP',
|
|
782
|
+
'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]
|
|
783
|
+
}
|
|
784
|
+
|
|
785
|
+
""" # noqa: E501
|
|
786
|
+
|
|
787
|
+
client: Any = None #: :meta private:
|
|
788
|
+
async_client: Any = None #: :meta private:
|
|
789
|
+
google_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
|
|
790
|
+
"""Google AI API key.
|
|
791
|
+
|
|
792
|
+
If not specified will be read from env var ``GOOGLE_API_KEY``."""
|
|
616
793
|
default_metadata: Sequence[Tuple[str, str]] = Field(
|
|
617
794
|
default_factory=list
|
|
618
795
|
) #: :meta private:
|
|
619
796
|
|
|
620
797
|
convert_system_message_to_human: bool = False
|
|
621
798
|
"""Whether to merge any leading SystemMessage into the following HumanMessage.
|
|
622
|
-
|
|
623
|
-
Gemini does not support system messages; any unsupported messages will
|
|
799
|
+
|
|
800
|
+
Gemini does not support system messages; any unsupported messages will
|
|
624
801
|
raise an error."""
|
|
625
802
|
|
|
626
803
|
class Config:
|
|
@@ -786,9 +963,18 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
786
963
|
**kwargs: Any,
|
|
787
964
|
) -> ChatResult:
|
|
788
965
|
if not self.async_client:
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
966
|
+
updated_kwargs = {
|
|
967
|
+
**kwargs,
|
|
968
|
+
**{
|
|
969
|
+
"tools": tools,
|
|
970
|
+
"functions": functions,
|
|
971
|
+
"safety_settings": safety_settings,
|
|
972
|
+
"tool_config": tool_config,
|
|
973
|
+
"generation_config": generation_config,
|
|
974
|
+
},
|
|
975
|
+
}
|
|
976
|
+
return await super()._agenerate(
|
|
977
|
+
messages, stop, run_manager, **updated_kwargs
|
|
792
978
|
)
|
|
793
979
|
|
|
794
980
|
request = self._prepare_request(
|
|
@@ -857,27 +1043,43 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
857
1043
|
generation_config: Optional[Dict[str, Any]] = None,
|
|
858
1044
|
**kwargs: Any,
|
|
859
1045
|
) -> AsyncIterator[ChatGenerationChunk]:
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
1046
|
+
if not self.async_client:
|
|
1047
|
+
updated_kwargs = {
|
|
1048
|
+
**kwargs,
|
|
1049
|
+
**{
|
|
1050
|
+
"tools": tools,
|
|
1051
|
+
"functions": functions,
|
|
1052
|
+
"safety_settings": safety_settings,
|
|
1053
|
+
"tool_config": tool_config,
|
|
1054
|
+
"generation_config": generation_config,
|
|
1055
|
+
},
|
|
1056
|
+
}
|
|
1057
|
+
async for value in super()._astream(
|
|
1058
|
+
messages, stop, run_manager, **updated_kwargs
|
|
1059
|
+
):
|
|
1060
|
+
yield value
|
|
1061
|
+
else:
|
|
1062
|
+
request = self._prepare_request(
|
|
1063
|
+
messages,
|
|
1064
|
+
stop=stop,
|
|
1065
|
+
tools=tools,
|
|
1066
|
+
functions=functions,
|
|
1067
|
+
safety_settings=safety_settings,
|
|
1068
|
+
tool_config=tool_config,
|
|
1069
|
+
generation_config=generation_config,
|
|
1070
|
+
)
|
|
1071
|
+
async for chunk in await _achat_with_retry(
|
|
1072
|
+
request=request,
|
|
1073
|
+
generation_method=self.async_client.stream_generate_content,
|
|
1074
|
+
**kwargs,
|
|
1075
|
+
metadata=self.default_metadata,
|
|
1076
|
+
):
|
|
1077
|
+
_chat_result = _response_to_result(chunk, stream=True)
|
|
1078
|
+
gen = cast(ChatGenerationChunk, _chat_result.generations[0])
|
|
877
1079
|
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
1080
|
+
if run_manager:
|
|
1081
|
+
await run_manager.on_llm_new_token(gen.text)
|
|
1082
|
+
yield gen
|
|
881
1083
|
|
|
882
1084
|
def _prepare_request(
|
|
883
1085
|
self,
|
|
@@ -892,9 +1094,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
892
1094
|
) -> Tuple[GenerateContentRequest, Dict[str, Any]]:
|
|
893
1095
|
formatted_tools = None
|
|
894
1096
|
if tools:
|
|
895
|
-
formatted_tools = [
|
|
896
|
-
convert_to_genai_function_declarations(tool) for tool in tools
|
|
897
|
-
]
|
|
1097
|
+
formatted_tools = [convert_to_genai_function_declarations(tools)]
|
|
898
1098
|
elif functions:
|
|
899
1099
|
formatted_tools = [convert_to_genai_function_declarations(functions)]
|
|
900
1100
|
|
|
@@ -959,7 +1159,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
959
1159
|
)
|
|
960
1160
|
else:
|
|
961
1161
|
parser = JsonOutputToolsParser()
|
|
962
|
-
|
|
1162
|
+
tool_choice = _get_tool_name(schema) if self._supports_tool_choice else None
|
|
1163
|
+
llm = self.bind_tools([schema], tool_choice=tool_choice)
|
|
963
1164
|
if include_raw:
|
|
964
1165
|
parser_with_fallback = RunnablePassthrough.assign(
|
|
965
1166
|
parsed=itemgetter("raw") | parser, parsing_error=lambda _: None
|
|
@@ -997,9 +1198,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
997
1198
|
f"both:\n\n{tool_choice=}\n\n{tool_config=}"
|
|
998
1199
|
)
|
|
999
1200
|
# Bind dicts for easier serialization/deserialization.
|
|
1000
|
-
genai_tools = [
|
|
1001
|
-
tool_to_dict(convert_to_genai_function_declarations(tool)) for tool in tools
|
|
1002
|
-
]
|
|
1201
|
+
genai_tools = [tool_to_dict(convert_to_genai_function_declarations(tools))]
|
|
1003
1202
|
if tool_choice:
|
|
1004
1203
|
all_names = [
|
|
1005
1204
|
f["name"] # type: ignore[index]
|
|
@@ -1007,5 +1206,15 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1007
1206
|
for f in t["function_declarations"]
|
|
1008
1207
|
]
|
|
1009
1208
|
tool_config = _tool_choice_to_tool_config(tool_choice, all_names)
|
|
1010
|
-
|
|
1011
1209
|
return self.bind(tools=genai_tools, tool_config=tool_config, **kwargs)
|
|
1210
|
+
|
|
1211
|
+
@property
|
|
1212
|
+
def _supports_tool_choice(self) -> bool:
|
|
1213
|
+
return "gemini-1.5-pro" in self.model
|
|
1214
|
+
|
|
1215
|
+
|
|
1216
|
+
def _get_tool_name(
|
|
1217
|
+
tool: Union[ToolDict, GoogleTool],
|
|
1218
|
+
) -> str:
|
|
1219
|
+
genai_tool = tool_to_dict(convert_to_genai_function_declarations([tool]))
|
|
1220
|
+
return [f["name"] for f in genai_tool["function_declarations"]][0] # type: ignore[index]
|
|
@@ -39,20 +39,20 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
39
39
|
embeddings.embed_query("What's our Q1 revenue?")
|
|
40
40
|
"""
|
|
41
41
|
|
|
42
|
-
client: Any #: :meta private:
|
|
42
|
+
client: Any = None #: :meta private:
|
|
43
43
|
model: str = Field(
|
|
44
44
|
...,
|
|
45
45
|
description="The name of the embedding model to use. "
|
|
46
46
|
"Example: models/embedding-001",
|
|
47
47
|
)
|
|
48
48
|
task_type: Optional[str] = Field(
|
|
49
|
-
None,
|
|
49
|
+
default=None,
|
|
50
50
|
description="The task type. Valid options include: "
|
|
51
51
|
"task_type_unspecified, retrieval_query, retrieval_document, "
|
|
52
52
|
"semantic_similarity, classification, and clustering",
|
|
53
53
|
)
|
|
54
54
|
google_api_key: Optional[SecretStr] = Field(
|
|
55
|
-
None,
|
|
55
|
+
default=None,
|
|
56
56
|
description="The Google API key to use. If not provided, "
|
|
57
57
|
"the GOOGLE_API_KEY environment variable will be used.",
|
|
58
58
|
)
|
|
@@ -64,18 +64,18 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
64
64
|
"provided, credentials will be ascertained from the GOOGLE_API_KEY envvar",
|
|
65
65
|
)
|
|
66
66
|
client_options: Optional[Dict] = Field(
|
|
67
|
-
None,
|
|
67
|
+
default=None,
|
|
68
68
|
description=(
|
|
69
69
|
"A dictionary of client options to pass to the Google API client, "
|
|
70
70
|
"such as `api_endpoint`."
|
|
71
71
|
),
|
|
72
72
|
)
|
|
73
73
|
transport: Optional[str] = Field(
|
|
74
|
-
None,
|
|
74
|
+
default=None,
|
|
75
75
|
description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
|
|
76
76
|
)
|
|
77
77
|
request_options: Optional[Dict] = Field(
|
|
78
|
-
None,
|
|
78
|
+
default=None,
|
|
79
79
|
description="A dictionary of request options to pass to the Google API client."
|
|
80
80
|
"Example: `{'timeout': 10}`",
|
|
81
81
|
)
|
langchain_google_genai/llms.py
CHANGED
|
@@ -149,18 +149,18 @@ Supported examples:
|
|
|
149
149
|
"""The maximum number of seconds to wait for a response."""
|
|
150
150
|
|
|
151
151
|
client_options: Optional[Dict] = Field(
|
|
152
|
-
None,
|
|
152
|
+
default=None,
|
|
153
153
|
description=(
|
|
154
154
|
"A dictionary of client options to pass to the Google API client, "
|
|
155
155
|
"such as `api_endpoint`."
|
|
156
156
|
),
|
|
157
157
|
)
|
|
158
158
|
transport: Optional[str] = Field(
|
|
159
|
-
None,
|
|
159
|
+
default=None,
|
|
160
160
|
description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
|
|
161
161
|
)
|
|
162
162
|
additional_headers: Optional[Dict[str, str]] = Field(
|
|
163
|
-
None,
|
|
163
|
+
default=None,
|
|
164
164
|
description=(
|
|
165
165
|
"A key-value dictionary representing additional headers for the model call"
|
|
166
166
|
),
|
|
@@ -212,7 +212,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
212
212
|
llm = GoogleGenerativeAI(model="gemini-pro")
|
|
213
213
|
"""
|
|
214
214
|
|
|
215
|
-
client: Any #: :meta private:
|
|
215
|
+
client: Any = None #: :meta private:
|
|
216
216
|
|
|
217
217
|
@root_validator()
|
|
218
218
|
def validate_environment(cls, values: Dict) -> Dict:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.8
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain-google
|
|
6
6
|
License: MIT
|
|
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
14
|
Provides-Extra: images
|
|
15
15
|
Requires-Dist: google-generativeai (>=0.7.0,<0.8.0)
|
|
16
|
-
Requires-Dist: langchain-core (>=0.2.
|
|
16
|
+
Requires-Dist: langchain-core (>=0.2.17,<0.3)
|
|
17
17
|
Requires-Dist: pillow (>=10.1.0,<11.0.0) ; extra == "images"
|
|
18
18
|
Project-URL: Repository, https://github.com/langchain-ai/langchain-google
|
|
19
19
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
|
|
@@ -1,16 +1,16 @@
|
|
|
1
1
|
langchain_google_genai/__init__.py,sha256=Oji-S2KYWrku1wyQEskY84IOfY8MfRhujjJ4d7hbsk4,2758
|
|
2
2
|
langchain_google_genai/_common.py,sha256=ASlwE8hEbvOm55BVF_D4rf2nl7RYsnpsi5xbM6DW3Cc,1576
|
|
3
3
|
langchain_google_genai/_enums.py,sha256=KLPmxS1K83K4HjBIXFaXoL_sFEOv8Hq-2B2PDMKyDgo,197
|
|
4
|
-
langchain_google_genai/_function_utils.py,sha256
|
|
4
|
+
langchain_google_genai/_function_utils.py,sha256=iVBrkTeWNW9UF0L2DhmTDTVSjYecqMEM4dzRj1LqyII,11074
|
|
5
5
|
langchain_google_genai/_genai_extension.py,sha256=ZwNwLV22RSf9LB7FOCLsoHzLlQDF-EQmRNYM1an2uSw,20769
|
|
6
6
|
langchain_google_genai/_image_utils.py,sha256=-0XgCMdYkvrIktFvUpy-2GPbFgfSVKZICawB2hiJzus,4999
|
|
7
|
-
langchain_google_genai/chat_models.py,sha256=
|
|
8
|
-
langchain_google_genai/embeddings.py,sha256=
|
|
7
|
+
langchain_google_genai/chat_models.py,sha256=0RHzSbtLSJ8P7fqTVGsxv8uuVfvZPmqWdjiyHIb-Vwo,45570
|
|
8
|
+
langchain_google_genai/embeddings.py,sha256=FWouioVYXV4Hpikl7C-EkzL9DwUzqa_bQkfJxRY7JZg,10089
|
|
9
9
|
langchain_google_genai/genai_aqa.py,sha256=zcC5cdFYtqLK7DGPhYGvWNeHHeU-CQKA9KhewmsA5lw,4303
|
|
10
10
|
langchain_google_genai/google_vector_store.py,sha256=PPIk-4FmD5UUdmYA2u7VcEhGsiztvRVN59QoGLXdfoA,16139
|
|
11
|
-
langchain_google_genai/llms.py,sha256=
|
|
11
|
+
langchain_google_genai/llms.py,sha256=W8tvdKVSfVTz6Vr3RSj_LJ9j-GGE3zdF3fLLrL7xxmc,13694
|
|
12
12
|
langchain_google_genai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
-
langchain_google_genai-1.0.
|
|
14
|
-
langchain_google_genai-1.0.
|
|
15
|
-
langchain_google_genai-1.0.
|
|
16
|
-
langchain_google_genai-1.0.
|
|
13
|
+
langchain_google_genai-1.0.8.dist-info/LICENSE,sha256=DppmdYJVSc1jd0aio6ptnMUn5tIHrdAhQ12SclEBfBg,1072
|
|
14
|
+
langchain_google_genai-1.0.8.dist-info/METADATA,sha256=Xlests--lzB6RZeKEx94sGpQ9UXJZRnHA2asaNcXQmA,3818
|
|
15
|
+
langchain_google_genai-1.0.8.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
16
|
+
langchain_google_genai-1.0.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|