langchain-google-genai 2.0.1__py3-none-any.whl → 2.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-google-genai might be problematic. Click here for more details.
- langchain_google_genai/_function_utils.py +66 -45
- langchain_google_genai/_genai_extension.py +1 -1
- langchain_google_genai/chat_models.py +220 -27
- langchain_google_genai/genai_aqa.py +2 -2
- {langchain_google_genai-2.0.1.dist-info → langchain_google_genai-2.0.3.dist-info}/METADATA +2 -2
- {langchain_google_genai-2.0.1.dist-info → langchain_google_genai-2.0.3.dist-info}/RECORD +8 -8
- {langchain_google_genai-2.0.1.dist-info → langchain_google_genai-2.0.3.dist-info}/LICENSE +0 -0
- {langchain_google_genai-2.0.1.dist-info → langchain_google_genai-2.0.3.dist-info}/WHEEL +0 -0
|
@@ -43,9 +43,9 @@ TYPE_ENUM = {
|
|
|
43
43
|
"boolean": glm.Type.BOOLEAN,
|
|
44
44
|
"array": glm.Type.ARRAY,
|
|
45
45
|
"object": glm.Type.OBJECT,
|
|
46
|
+
"null": None,
|
|
46
47
|
}
|
|
47
48
|
|
|
48
|
-
TYPE_ENUM_REVERSE = {v: k for k, v in TYPE_ENUM.items()}
|
|
49
49
|
_ALLOWED_SCHEMA_FIELDS = []
|
|
50
50
|
_ALLOWED_SCHEMA_FIELDS.extend([f.name for f in gapic.Schema()._pb.DESCRIPTOR.fields])
|
|
51
51
|
_ALLOWED_SCHEMA_FIELDS.extend(
|
|
@@ -102,12 +102,7 @@ def _format_json_schema_to_gapic(schema: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
102
102
|
elif key == "items":
|
|
103
103
|
converted_schema["items"] = _format_json_schema_to_gapic(value)
|
|
104
104
|
elif key == "properties":
|
|
105
|
-
|
|
106
|
-
converted_schema["properties"] = {}
|
|
107
|
-
for pkey, pvalue in value.items():
|
|
108
|
-
converted_schema["properties"][pkey] = _format_json_schema_to_gapic(
|
|
109
|
-
pvalue
|
|
110
|
-
)
|
|
105
|
+
converted_schema["properties"] = _get_properties_from_schema(value)
|
|
111
106
|
continue
|
|
112
107
|
elif key == "allOf":
|
|
113
108
|
if len(value) > 1:
|
|
@@ -137,7 +132,6 @@ def _dict_to_gapic_schema(schema: Dict[str, Any]) -> Optional[gapic.Schema]:
|
|
|
137
132
|
def _format_dict_to_function_declaration(
|
|
138
133
|
tool: Union[FunctionDescription, Dict[str, Any]],
|
|
139
134
|
) -> gapic.FunctionDeclaration:
|
|
140
|
-
print(tool)
|
|
141
135
|
return gapic.FunctionDeclaration(
|
|
142
136
|
name=tool.get("name") or tool.get("title"),
|
|
143
137
|
description=tool.get("description"),
|
|
@@ -159,13 +153,11 @@ def convert_to_genai_function_declarations(
|
|
|
159
153
|
for tool in tools:
|
|
160
154
|
if isinstance(tool, gapic.Tool):
|
|
161
155
|
gapic_tool.function_declarations.extend(tool.function_declarations)
|
|
156
|
+
elif isinstance(tool, dict) and "function_declarations" not in tool:
|
|
157
|
+
fd = _format_to_gapic_function_declaration(tool)
|
|
158
|
+
gapic_tool.function_declarations.append(fd)
|
|
162
159
|
elif isinstance(tool, dict):
|
|
163
|
-
|
|
164
|
-
fd = _format_to_gapic_function_declaration(tool)
|
|
165
|
-
gapic_tool.function_declarations.append(fd)
|
|
166
|
-
continue
|
|
167
|
-
tool = cast(_ToolDictLike, tool)
|
|
168
|
-
function_declarations = tool["function_declarations"]
|
|
160
|
+
function_declarations = cast(_ToolDictLike, tool)["function_declarations"]
|
|
169
161
|
if not isinstance(function_declarations, collections.abc.Sequence):
|
|
170
162
|
raise ValueError(
|
|
171
163
|
"function_declarations should be a list"
|
|
@@ -204,9 +196,12 @@ def _format_to_gapic_function_declaration(
|
|
|
204
196
|
elif isinstance(tool, type) and is_basemodel_subclass_safe(tool):
|
|
205
197
|
return _convert_pydantic_to_genai_function(tool)
|
|
206
198
|
elif isinstance(tool, dict):
|
|
207
|
-
if all(k in tool for k in ("
|
|
199
|
+
if all(k in tool for k in ("type", "function")) and tool["type"] == "function":
|
|
200
|
+
function = tool["function"]
|
|
201
|
+
elif (
|
|
202
|
+
all(k in tool for k in ("name", "description")) and "parameters" not in tool
|
|
203
|
+
):
|
|
208
204
|
function = cast(dict, tool)
|
|
209
|
-
function["parameters"] = {}
|
|
210
205
|
else:
|
|
211
206
|
if (
|
|
212
207
|
"parameters" in tool and tool["parameters"].get("properties") # type: ignore[index]
|
|
@@ -214,7 +209,10 @@ def _format_to_gapic_function_declaration(
|
|
|
214
209
|
function = convert_to_openai_tool(cast(dict, tool))["function"]
|
|
215
210
|
else:
|
|
216
211
|
function = cast(dict, tool)
|
|
217
|
-
|
|
212
|
+
function["parameters"] = function.get("parameters") or {}
|
|
213
|
+
# Empty 'properties' field not supported.
|
|
214
|
+
if not function["parameters"].get("properties"):
|
|
215
|
+
function["parameters"] = {}
|
|
218
216
|
return _format_dict_to_function_declaration(cast(FunctionDescription, function))
|
|
219
217
|
elif callable(tool):
|
|
220
218
|
return _format_base_tool_to_function_declaration(callable_as_lc_tool()(tool))
|
|
@@ -302,8 +300,10 @@ def _get_properties_from_schema(schema: Dict) -> Dict[str, Any]:
|
|
|
302
300
|
logger.warning(f"Value '{v}' is not supported in schema, ignoring v={v}")
|
|
303
301
|
continue
|
|
304
302
|
properties_item: Dict[str, Union[str, int, Dict, List]] = {}
|
|
305
|
-
if v.get("type") or v.get("anyOf"):
|
|
303
|
+
if v.get("type") or v.get("anyOf") or v.get("type_"):
|
|
306
304
|
properties_item["type_"] = _get_type_from_schema(v)
|
|
305
|
+
if _is_nullable_schema(v):
|
|
306
|
+
properties_item["nullable"] = True
|
|
307
307
|
|
|
308
308
|
if v.get("enum"):
|
|
309
309
|
properties_item["enum"] = v["enum"]
|
|
@@ -312,10 +312,10 @@ def _get_properties_from_schema(schema: Dict) -> Dict[str, Any]:
|
|
|
312
312
|
if description and isinstance(description, str):
|
|
313
313
|
properties_item["description"] = description
|
|
314
314
|
|
|
315
|
-
if
|
|
315
|
+
if properties_item.get("type_") == glm.Type.ARRAY and v.get("items"):
|
|
316
316
|
properties_item["items"] = _get_items_from_schema_any(v.get("items"))
|
|
317
317
|
|
|
318
|
-
if
|
|
318
|
+
if properties_item.get("type_") == glm.Type.OBJECT and v.get("properties"):
|
|
319
319
|
properties_item["properties"] = _get_properties_from_schema_any(
|
|
320
320
|
v.get("properties")
|
|
321
321
|
)
|
|
@@ -328,11 +328,7 @@ def _get_properties_from_schema(schema: Dict) -> Dict[str, Any]:
|
|
|
328
328
|
|
|
329
329
|
|
|
330
330
|
def _get_items_from_schema_any(schema: Any) -> Dict[str, Any]:
|
|
331
|
-
if isinstance(schema,
|
|
332
|
-
return _get_items_from_schema(schema)
|
|
333
|
-
if isinstance(schema, List):
|
|
334
|
-
return _get_items_from_schema(schema)
|
|
335
|
-
if isinstance(schema, str):
|
|
331
|
+
if isinstance(schema, (dict, list, str)):
|
|
336
332
|
return _get_items_from_schema(schema)
|
|
337
333
|
return {}
|
|
338
334
|
|
|
@@ -343,41 +339,66 @@ def _get_items_from_schema(schema: Union[Dict, List, str]) -> Dict[str, Any]:
|
|
|
343
339
|
for i, v in enumerate(schema):
|
|
344
340
|
items[f"item{i}"] = _get_properties_from_schema_any(v)
|
|
345
341
|
elif isinstance(schema, Dict):
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
)
|
|
358
|
-
if k == "title" and "description" not in item:
|
|
359
|
-
item["description"] = v
|
|
360
|
-
items = item
|
|
342
|
+
items["type_"] = _get_type_from_schema(schema)
|
|
343
|
+
if items["type_"] == glm.Type.OBJECT and "properties" in schema:
|
|
344
|
+
items["properties"] = _get_properties_from_schema_any(schema["properties"])
|
|
345
|
+
if "title" in schema:
|
|
346
|
+
items["title"] = schema
|
|
347
|
+
if "title" in schema or "description" in schema:
|
|
348
|
+
items["description"] = (
|
|
349
|
+
schema.get("description") or schema.get("title") or ""
|
|
350
|
+
)
|
|
351
|
+
if _is_nullable_schema(schema):
|
|
352
|
+
items["nullable"] = True
|
|
361
353
|
else:
|
|
362
354
|
# str
|
|
363
|
-
items["type_"] =
|
|
355
|
+
items["type_"] = _get_type_from_schema({"type": schema})
|
|
356
|
+
if _is_nullable_schema({"type": schema}):
|
|
357
|
+
items["nullable"] = True
|
|
358
|
+
|
|
364
359
|
return items
|
|
365
360
|
|
|
366
361
|
|
|
367
362
|
def _get_type_from_schema(schema: Dict[str, Any]) -> int:
|
|
363
|
+
return _get_nullable_type_from_schema(schema) or glm.Type.STRING
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
def _get_nullable_type_from_schema(schema: Dict[str, Any]) -> Optional[int]:
|
|
368
367
|
if "anyOf" in schema:
|
|
369
|
-
types = [
|
|
368
|
+
types = [
|
|
369
|
+
_get_nullable_type_from_schema(sub_schema) for sub_schema in schema["anyOf"]
|
|
370
|
+
]
|
|
370
371
|
types = [t for t in types if t is not None] # Remove None values
|
|
371
372
|
if types:
|
|
372
373
|
return types[-1] # TODO: update FunctionDeclaration and pass all types?
|
|
373
374
|
else:
|
|
374
375
|
pass
|
|
375
|
-
elif "type" in schema:
|
|
376
|
-
|
|
376
|
+
elif "type" in schema or "type_" in schema:
|
|
377
|
+
type_ = schema["type"] if "type" in schema else schema["type_"]
|
|
378
|
+
if isinstance(type_, int):
|
|
379
|
+
return type_
|
|
380
|
+
stype = str(schema["type"]) if "type" in schema else str(schema["type_"])
|
|
377
381
|
return TYPE_ENUM.get(stype, glm.Type.STRING)
|
|
378
382
|
else:
|
|
379
383
|
pass
|
|
380
|
-
return
|
|
384
|
+
return glm.Type.STRING # Default to string if no valid types found
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def _is_nullable_schema(schema: Dict[str, Any]) -> bool:
|
|
388
|
+
if "anyOf" in schema:
|
|
389
|
+
types = [
|
|
390
|
+
_get_nullable_type_from_schema(sub_schema) for sub_schema in schema["anyOf"]
|
|
391
|
+
]
|
|
392
|
+
return any(t is None for t in types)
|
|
393
|
+
elif "type" in schema or "type_" in schema:
|
|
394
|
+
type_ = schema["type"] if "type" in schema else schema["type_"]
|
|
395
|
+
if isinstance(type_, int):
|
|
396
|
+
return False
|
|
397
|
+
stype = str(schema["type"]) if "type" in schema else str(schema["type_"])
|
|
398
|
+
return TYPE_ENUM.get(stype, glm.Type.STRING) is None
|
|
399
|
+
else:
|
|
400
|
+
pass
|
|
401
|
+
return False
|
|
381
402
|
|
|
382
403
|
|
|
383
404
|
_ToolChoiceType = Union[
|
|
@@ -238,7 +238,7 @@ def _prepare_config(
|
|
|
238
238
|
client_info: Optional[gapic_v1.client_info.ClientInfo] = None,
|
|
239
239
|
transport: Optional[str] = None,
|
|
240
240
|
) -> Dict[str, Any]:
|
|
241
|
-
formatted_client_options = {"api_endpoint": _config.api_endpoint}
|
|
241
|
+
formatted_client_options: dict = {"api_endpoint": _config.api_endpoint}
|
|
242
242
|
if client_options:
|
|
243
243
|
formatted_client_options.update(**client_options)
|
|
244
244
|
if not credentials and api_key:
|
|
@@ -49,7 +49,9 @@ from google.ai.generativelanguage_v1beta.types import (
|
|
|
49
49
|
ToolConfig,
|
|
50
50
|
VideoMetadata,
|
|
51
51
|
)
|
|
52
|
+
from google.generativeai.caching import CachedContent # type: ignore[import]
|
|
52
53
|
from google.generativeai.types import Tool as GoogleTool # type: ignore[import]
|
|
54
|
+
from google.generativeai.types import caching_types, content_types
|
|
53
55
|
from google.generativeai.types.content_types import ( # type: ignore[import]
|
|
54
56
|
FunctionDeclarationType,
|
|
55
57
|
ToolDict,
|
|
@@ -80,6 +82,7 @@ from langchain_core.output_parsers.openai_tools import (
|
|
|
80
82
|
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
|
81
83
|
from langchain_core.runnables import Runnable, RunnablePassthrough
|
|
82
84
|
from langchain_core.utils import secret_from_env
|
|
85
|
+
from langchain_core.utils.function_calling import convert_to_openai_tool
|
|
83
86
|
from pydantic import (
|
|
84
87
|
BaseModel,
|
|
85
88
|
ConfigDict,
|
|
@@ -566,20 +569,29 @@ def _parse_response_candidate(
|
|
|
566
569
|
def _response_to_result(
|
|
567
570
|
response: GenerateContentResponse,
|
|
568
571
|
stream: bool = False,
|
|
572
|
+
prev_usage: Optional[UsageMetadata] = None,
|
|
569
573
|
) -> ChatResult:
|
|
570
574
|
"""Converts a PaLM API response into a LangChain ChatResult."""
|
|
571
575
|
llm_output = {"prompt_feedback": proto.Message.to_dict(response.prompt_feedback)}
|
|
572
576
|
|
|
577
|
+
# previous usage metadata needs to be subtracted because gemini api returns
|
|
578
|
+
# already-accumulated token counts with each chunk
|
|
579
|
+
prev_input_tokens = prev_usage["input_tokens"] if prev_usage else 0
|
|
580
|
+
prev_output_tokens = prev_usage["output_tokens"] if prev_usage else 0
|
|
581
|
+
prev_total_tokens = prev_usage["total_tokens"] if prev_usage else 0
|
|
582
|
+
|
|
573
583
|
# Get usage metadata
|
|
574
584
|
try:
|
|
575
585
|
input_tokens = response.usage_metadata.prompt_token_count
|
|
576
586
|
output_tokens = response.usage_metadata.candidates_token_count
|
|
577
587
|
total_tokens = response.usage_metadata.total_token_count
|
|
578
|
-
|
|
588
|
+
cache_read_tokens = response.usage_metadata.cached_content_token_count
|
|
589
|
+
if input_tokens + output_tokens + cache_read_tokens + total_tokens > 0:
|
|
579
590
|
lc_usage = UsageMetadata(
|
|
580
|
-
input_tokens=input_tokens,
|
|
581
|
-
output_tokens=output_tokens,
|
|
582
|
-
total_tokens=total_tokens,
|
|
591
|
+
input_tokens=input_tokens - prev_input_tokens,
|
|
592
|
+
output_tokens=output_tokens - prev_output_tokens,
|
|
593
|
+
total_tokens=total_tokens - prev_total_tokens,
|
|
594
|
+
input_token_details={"cache_read": cache_read_tokens},
|
|
583
595
|
)
|
|
584
596
|
else:
|
|
585
597
|
lc_usage = None
|
|
@@ -598,12 +610,17 @@ def _response_to_result(
|
|
|
598
610
|
]
|
|
599
611
|
message = _parse_response_candidate(candidate, streaming=stream)
|
|
600
612
|
message.usage_metadata = lc_usage
|
|
601
|
-
|
|
602
|
-
(
|
|
603
|
-
|
|
604
|
-
|
|
613
|
+
if stream:
|
|
614
|
+
generations.append(
|
|
615
|
+
ChatGenerationChunk(
|
|
616
|
+
message=cast(AIMessageChunk, message),
|
|
617
|
+
generation_info=generation_info,
|
|
618
|
+
)
|
|
619
|
+
)
|
|
620
|
+
else:
|
|
621
|
+
generations.append(
|
|
622
|
+
ChatGeneration(message=message, generation_info=generation_info)
|
|
605
623
|
)
|
|
606
|
-
)
|
|
607
624
|
if not response.candidates:
|
|
608
625
|
# Likely a "prompt feedback" violation (e.g., toxic input)
|
|
609
626
|
# Raising an error would be different than how OpenAI handles it,
|
|
@@ -612,12 +629,14 @@ def _response_to_result(
|
|
|
612
629
|
"Gemini produced an empty response. Continuing with empty message\n"
|
|
613
630
|
f"Feedback: {response.prompt_feedback}"
|
|
614
631
|
)
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
632
|
+
if stream:
|
|
633
|
+
generations = [
|
|
634
|
+
ChatGenerationChunk(
|
|
635
|
+
message=AIMessageChunk(content=""), generation_info={}
|
|
636
|
+
)
|
|
637
|
+
]
|
|
638
|
+
else:
|
|
639
|
+
generations = [ChatGeneration(message=AIMessage(""), generation_info={})]
|
|
621
640
|
return ChatResult(generations=generations, llm_output=llm_output)
|
|
622
641
|
|
|
623
642
|
|
|
@@ -809,7 +828,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
809
828
|
{'input_tokens': 18, 'output_tokens': 5, 'total_tokens': 23}
|
|
810
829
|
|
|
811
830
|
|
|
812
|
-
|
|
831
|
+
Response metadata
|
|
813
832
|
.. code-block:: python
|
|
814
833
|
|
|
815
834
|
ai_msg = llm.invoke(messages)
|
|
@@ -842,6 +861,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
842
861
|
Gemini does not support system messages; any unsupported messages will
|
|
843
862
|
raise an error."""
|
|
844
863
|
|
|
864
|
+
cached_content: Optional[str] = None
|
|
865
|
+
"""The name of the cached content used as context to serve the prediction.
|
|
866
|
+
|
|
867
|
+
Note: only used in explicit caching, where users can have control over caching
|
|
868
|
+
(e.g. what content to cache) and enjoy guaranteed cost savings. Format:
|
|
869
|
+
``cachedContents/{cachedContent}``.
|
|
870
|
+
"""
|
|
871
|
+
|
|
845
872
|
model_config = ConfigDict(
|
|
846
873
|
populate_by_name=True,
|
|
847
874
|
)
|
|
@@ -976,6 +1003,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
976
1003
|
safety_settings: Optional[SafetySettingDict] = None,
|
|
977
1004
|
tool_config: Optional[Union[Dict, _ToolConfigDict]] = None,
|
|
978
1005
|
generation_config: Optional[Dict[str, Any]] = None,
|
|
1006
|
+
cached_content: Optional[str] = None,
|
|
1007
|
+
tool_choice: Optional[Union[_ToolChoiceType, bool]] = None,
|
|
979
1008
|
**kwargs: Any,
|
|
980
1009
|
) -> ChatResult:
|
|
981
1010
|
request = self._prepare_request(
|
|
@@ -986,6 +1015,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
986
1015
|
safety_settings=safety_settings,
|
|
987
1016
|
tool_config=tool_config,
|
|
988
1017
|
generation_config=generation_config,
|
|
1018
|
+
cached_content=cached_content or self.cached_content,
|
|
1019
|
+
tool_choice=tool_choice,
|
|
989
1020
|
)
|
|
990
1021
|
response: GenerateContentResponse = _chat_with_retry(
|
|
991
1022
|
request=request,
|
|
@@ -1006,6 +1037,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1006
1037
|
safety_settings: Optional[SafetySettingDict] = None,
|
|
1007
1038
|
tool_config: Optional[Union[Dict, _ToolConfigDict]] = None,
|
|
1008
1039
|
generation_config: Optional[Dict[str, Any]] = None,
|
|
1040
|
+
cached_content: Optional[str] = None,
|
|
1041
|
+
tool_choice: Optional[Union[_ToolChoiceType, bool]] = None,
|
|
1009
1042
|
**kwargs: Any,
|
|
1010
1043
|
) -> ChatResult:
|
|
1011
1044
|
if not self.async_client:
|
|
@@ -1031,6 +1064,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1031
1064
|
safety_settings=safety_settings,
|
|
1032
1065
|
tool_config=tool_config,
|
|
1033
1066
|
generation_config=generation_config,
|
|
1067
|
+
cached_content=cached_content or self.cached_content,
|
|
1068
|
+
tool_choice=tool_choice,
|
|
1034
1069
|
)
|
|
1035
1070
|
response: GenerateContentResponse = await _achat_with_retry(
|
|
1036
1071
|
request=request,
|
|
@@ -1051,6 +1086,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1051
1086
|
safety_settings: Optional[SafetySettingDict] = None,
|
|
1052
1087
|
tool_config: Optional[Union[Dict, _ToolConfigDict]] = None,
|
|
1053
1088
|
generation_config: Optional[Dict[str, Any]] = None,
|
|
1089
|
+
cached_content: Optional[str] = None,
|
|
1090
|
+
tool_choice: Optional[Union[_ToolChoiceType, bool]] = None,
|
|
1054
1091
|
**kwargs: Any,
|
|
1055
1092
|
) -> Iterator[ChatGenerationChunk]:
|
|
1056
1093
|
request = self._prepare_request(
|
|
@@ -1061,6 +1098,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1061
1098
|
safety_settings=safety_settings,
|
|
1062
1099
|
tool_config=tool_config,
|
|
1063
1100
|
generation_config=generation_config,
|
|
1101
|
+
cached_content=cached_content or self.cached_content,
|
|
1102
|
+
tool_choice=tool_choice,
|
|
1064
1103
|
)
|
|
1065
1104
|
response: GenerateContentResponse = _chat_with_retry(
|
|
1066
1105
|
request=request,
|
|
@@ -1068,9 +1107,31 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1068
1107
|
**kwargs,
|
|
1069
1108
|
metadata=self.default_metadata,
|
|
1070
1109
|
)
|
|
1110
|
+
|
|
1111
|
+
prev_usage_metadata: UsageMetadata | None = None
|
|
1071
1112
|
for chunk in response:
|
|
1072
|
-
_chat_result = _response_to_result(
|
|
1113
|
+
_chat_result = _response_to_result(
|
|
1114
|
+
chunk, stream=True, prev_usage=prev_usage_metadata
|
|
1115
|
+
)
|
|
1073
1116
|
gen = cast(ChatGenerationChunk, _chat_result.generations[0])
|
|
1117
|
+
message = cast(AIMessageChunk, gen.message)
|
|
1118
|
+
|
|
1119
|
+
curr_usage_metadata: UsageMetadata | dict[str, int] = (
|
|
1120
|
+
message.usage_metadata or {}
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
prev_usage_metadata = (
|
|
1124
|
+
message.usage_metadata
|
|
1125
|
+
if prev_usage_metadata is None
|
|
1126
|
+
else UsageMetadata(
|
|
1127
|
+
input_tokens=prev_usage_metadata.get("input_tokens", 0)
|
|
1128
|
+
+ curr_usage_metadata.get("input_tokens", 0),
|
|
1129
|
+
output_tokens=prev_usage_metadata.get("output_tokens", 0)
|
|
1130
|
+
+ curr_usage_metadata.get("output_tokens", 0),
|
|
1131
|
+
total_tokens=prev_usage_metadata.get("total_tokens", 0)
|
|
1132
|
+
+ curr_usage_metadata.get("total_tokens", 0),
|
|
1133
|
+
)
|
|
1134
|
+
)
|
|
1074
1135
|
|
|
1075
1136
|
if run_manager:
|
|
1076
1137
|
run_manager.on_llm_new_token(gen.text)
|
|
@@ -1087,6 +1148,8 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1087
1148
|
safety_settings: Optional[SafetySettingDict] = None,
|
|
1088
1149
|
tool_config: Optional[Union[Dict, _ToolConfigDict]] = None,
|
|
1089
1150
|
generation_config: Optional[Dict[str, Any]] = None,
|
|
1151
|
+
cached_content: Optional[str] = None,
|
|
1152
|
+
tool_choice: Optional[Union[_ToolChoiceType, bool]] = None,
|
|
1090
1153
|
**kwargs: Any,
|
|
1091
1154
|
) -> AsyncIterator[ChatGenerationChunk]:
|
|
1092
1155
|
if not self.async_client:
|
|
@@ -1113,15 +1176,38 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1113
1176
|
safety_settings=safety_settings,
|
|
1114
1177
|
tool_config=tool_config,
|
|
1115
1178
|
generation_config=generation_config,
|
|
1179
|
+
cached_content=cached_content or self.cached_content,
|
|
1180
|
+
tool_choice=tool_choice,
|
|
1116
1181
|
)
|
|
1182
|
+
prev_usage_metadata: UsageMetadata | None = None
|
|
1117
1183
|
async for chunk in await _achat_with_retry(
|
|
1118
1184
|
request=request,
|
|
1119
1185
|
generation_method=self.async_client.stream_generate_content,
|
|
1120
1186
|
**kwargs,
|
|
1121
1187
|
metadata=self.default_metadata,
|
|
1122
1188
|
):
|
|
1123
|
-
_chat_result = _response_to_result(
|
|
1189
|
+
_chat_result = _response_to_result(
|
|
1190
|
+
chunk, stream=True, prev_usage=prev_usage_metadata
|
|
1191
|
+
)
|
|
1124
1192
|
gen = cast(ChatGenerationChunk, _chat_result.generations[0])
|
|
1193
|
+
message = cast(AIMessageChunk, gen.message)
|
|
1194
|
+
|
|
1195
|
+
curr_usage_metadata: UsageMetadata | dict[str, int] = (
|
|
1196
|
+
message.usage_metadata or {}
|
|
1197
|
+
)
|
|
1198
|
+
|
|
1199
|
+
prev_usage_metadata = (
|
|
1200
|
+
message.usage_metadata
|
|
1201
|
+
if prev_usage_metadata is None
|
|
1202
|
+
else UsageMetadata(
|
|
1203
|
+
input_tokens=prev_usage_metadata.get("input_tokens", 0)
|
|
1204
|
+
+ curr_usage_metadata.get("input_tokens", 0),
|
|
1205
|
+
output_tokens=prev_usage_metadata.get("output_tokens", 0)
|
|
1206
|
+
+ curr_usage_metadata.get("output_tokens", 0),
|
|
1207
|
+
total_tokens=prev_usage_metadata.get("total_tokens", 0)
|
|
1208
|
+
+ curr_usage_metadata.get("total_tokens", 0),
|
|
1209
|
+
)
|
|
1210
|
+
)
|
|
1125
1211
|
|
|
1126
1212
|
if run_manager:
|
|
1127
1213
|
await run_manager.on_llm_new_token(gen.text)
|
|
@@ -1136,8 +1222,15 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1136
1222
|
functions: Optional[Sequence[FunctionDeclarationType]] = None,
|
|
1137
1223
|
safety_settings: Optional[SafetySettingDict] = None,
|
|
1138
1224
|
tool_config: Optional[Union[Dict, _ToolConfigDict]] = None,
|
|
1225
|
+
tool_choice: Optional[Union[_ToolChoiceType, bool]] = None,
|
|
1139
1226
|
generation_config: Optional[Dict[str, Any]] = None,
|
|
1227
|
+
cached_content: Optional[str] = None,
|
|
1140
1228
|
) -> Tuple[GenerateContentRequest, Dict[str, Any]]:
|
|
1229
|
+
if tool_choice and tool_config:
|
|
1230
|
+
raise ValueError(
|
|
1231
|
+
"Must specify at most one of tool_choice and tool_config, received "
|
|
1232
|
+
f"both:\n\n{tool_choice=}\n\n{tool_config=}"
|
|
1233
|
+
)
|
|
1141
1234
|
formatted_tools = None
|
|
1142
1235
|
if tools:
|
|
1143
1236
|
formatted_tools = [convert_to_genai_function_declarations(tools)]
|
|
@@ -1148,6 +1241,18 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1148
1241
|
messages,
|
|
1149
1242
|
convert_system_message_to_human=self.convert_system_message_to_human,
|
|
1150
1243
|
)
|
|
1244
|
+
if tool_choice:
|
|
1245
|
+
if not formatted_tools:
|
|
1246
|
+
msg = (
|
|
1247
|
+
f"Received {tool_choice=} but no {tools=}. 'tool_choice' can only "
|
|
1248
|
+
f"be specified if 'tools' is specified."
|
|
1249
|
+
)
|
|
1250
|
+
raise ValueError(msg)
|
|
1251
|
+
all_names = [
|
|
1252
|
+
f.name for t in formatted_tools for f in t.function_declarations
|
|
1253
|
+
]
|
|
1254
|
+
tool_config = _tool_choice_to_tool_config(tool_choice, all_names)
|
|
1255
|
+
|
|
1151
1256
|
formatted_tool_config = None
|
|
1152
1257
|
if tool_config:
|
|
1153
1258
|
formatted_tool_config = ToolConfig(
|
|
@@ -1168,6 +1273,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1168
1273
|
generation_config=self._prepare_params(
|
|
1169
1274
|
stop, generation_config=generation_config
|
|
1170
1275
|
),
|
|
1276
|
+
cached_content=cached_content,
|
|
1171
1277
|
)
|
|
1172
1278
|
if system_instruction:
|
|
1173
1279
|
request.system_instruction = system_instruction
|
|
@@ -1243,20 +1349,107 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
1243
1349
|
"Must specify at most one of tool_choice and tool_config, received "
|
|
1244
1350
|
f"both:\n\n{tool_choice=}\n\n{tool_config=}"
|
|
1245
1351
|
)
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
for t in genai_tools
|
|
1252
|
-
for f in t["function_declarations"]
|
|
1352
|
+
try:
|
|
1353
|
+
formatted_tools: list = [convert_to_openai_tool(tool) for tool in tools] # type: ignore[arg-type]
|
|
1354
|
+
except Exception:
|
|
1355
|
+
formatted_tools = [
|
|
1356
|
+
tool_to_dict(convert_to_genai_function_declarations(tools))
|
|
1253
1357
|
]
|
|
1358
|
+
if tool_choice:
|
|
1359
|
+
kwargs["tool_choice"] = tool_choice
|
|
1360
|
+
elif tool_config:
|
|
1361
|
+
kwargs["tool_config"] = tool_config
|
|
1362
|
+
else:
|
|
1363
|
+
pass
|
|
1364
|
+
return self.bind(tools=formatted_tools, **kwargs)
|
|
1365
|
+
|
|
1366
|
+
def create_cached_content(
|
|
1367
|
+
self,
|
|
1368
|
+
contents: Union[List[BaseMessage], content_types.ContentsType],
|
|
1369
|
+
*,
|
|
1370
|
+
display_name: str | None = None,
|
|
1371
|
+
tools: Union[ToolDict, GoogleTool, None] = None,
|
|
1372
|
+
tool_choice: Optional[Union[_ToolChoiceType, bool]] = None,
|
|
1373
|
+
ttl: Optional[caching_types.TTLTypes] = None,
|
|
1374
|
+
expire_time: Optional[caching_types.ExpireTimeTypes] = None,
|
|
1375
|
+
) -> str:
|
|
1376
|
+
"""
|
|
1377
|
+
|
|
1378
|
+
Args:
|
|
1379
|
+
display_name: The user-generated meaningful display name
|
|
1380
|
+
of the cached content. `display_name` must be no
|
|
1381
|
+
more than 128 unicode characters.
|
|
1382
|
+
contents: Contents to cache.
|
|
1383
|
+
tools: A list of `Tools` the model may use to generate response.
|
|
1384
|
+
tool_choice: Which tool to require the model to call.
|
|
1385
|
+
ttl: TTL for cached resource (in seconds). Defaults to 1 hour.
|
|
1386
|
+
`ttl` and `expire_time` are exclusive arguments.
|
|
1387
|
+
expire_time: Expiration time for cached resource.
|
|
1388
|
+
`ttl` and `expire_time` are exclusive arguments.
|
|
1389
|
+
"""
|
|
1390
|
+
system: Optional[content_types.ContentType] = None
|
|
1391
|
+
genai_contents: list = []
|
|
1392
|
+
if all(isinstance(c, BaseMessage) for c in contents):
|
|
1393
|
+
system, genai_contents = _parse_chat_history(
|
|
1394
|
+
contents,
|
|
1395
|
+
convert_system_message_to_human=self.convert_system_message_to_human,
|
|
1396
|
+
)
|
|
1397
|
+
elif any(isinstance(c, BaseMessage) for c in contents):
|
|
1398
|
+
raise ValueError(
|
|
1399
|
+
f"'contents' must either be a list of "
|
|
1400
|
+
f"langchain_core.messages.BaseMessage or a list "
|
|
1401
|
+
f"google.generativeai.types.content_types.ContentType, but not a mix "
|
|
1402
|
+
f"of the two. Received {contents}"
|
|
1403
|
+
)
|
|
1404
|
+
else:
|
|
1405
|
+
for content in contents:
|
|
1406
|
+
if hasattr(content, "role") and content.role == "system":
|
|
1407
|
+
if system is not None:
|
|
1408
|
+
warnings.warn(
|
|
1409
|
+
"Received multiple pieces of content with role 'system'. "
|
|
1410
|
+
"Should only be one set of system instructions. Ignoring "
|
|
1411
|
+
"all but the first 'system' content."
|
|
1412
|
+
)
|
|
1413
|
+
else:
|
|
1414
|
+
system = content
|
|
1415
|
+
elif isinstance(content, dict) and content.get("role") == "system":
|
|
1416
|
+
if system is not None:
|
|
1417
|
+
warnings.warn(
|
|
1418
|
+
"Received multiple pieces of content with role 'system'. "
|
|
1419
|
+
"Should only be one set of system instructions. Ignoring "
|
|
1420
|
+
"all but the first 'system' content."
|
|
1421
|
+
)
|
|
1422
|
+
else:
|
|
1423
|
+
system = content
|
|
1424
|
+
else:
|
|
1425
|
+
genai_contents.append(content)
|
|
1426
|
+
if tools:
|
|
1427
|
+
genai_tools = [convert_to_genai_function_declarations(tools)]
|
|
1428
|
+
else:
|
|
1429
|
+
genai_tools = None
|
|
1430
|
+
if tool_choice and genai_tools:
|
|
1431
|
+
all_names = [f.name for t in genai_tools for f in t.function_declarations]
|
|
1254
1432
|
tool_config = _tool_choice_to_tool_config(tool_choice, all_names)
|
|
1255
|
-
|
|
1433
|
+
genai_tool_config = ToolConfig(
|
|
1434
|
+
function_calling_config=tool_config["function_calling_config"]
|
|
1435
|
+
)
|
|
1436
|
+
else:
|
|
1437
|
+
genai_tool_config = None
|
|
1438
|
+
cached_content = CachedContent.create(
|
|
1439
|
+
model=self.model,
|
|
1440
|
+
system_instruction=system,
|
|
1441
|
+
contents=genai_contents,
|
|
1442
|
+
display_name=display_name,
|
|
1443
|
+
tools=genai_tools,
|
|
1444
|
+
tool_config=genai_tool_config,
|
|
1445
|
+
ttl=ttl,
|
|
1446
|
+
expire_time=expire_time,
|
|
1447
|
+
)
|
|
1448
|
+
return cached_content.name
|
|
1256
1449
|
|
|
1257
1450
|
@property
|
|
1258
1451
|
def _supports_tool_choice(self) -> bool:
|
|
1259
|
-
return "gemini-1.5-pro" in self.model
|
|
1452
|
+
return "gemini-1.5-pro" in self.model or "gemini-1.5-flash" in self.model
|
|
1260
1453
|
|
|
1261
1454
|
|
|
1262
1455
|
def _get_tool_name(
|
|
@@ -116,8 +116,8 @@ class GenAIAqa(RunnableSerializable[AqaInput, AqaOutput]):
|
|
|
116
116
|
super().__init__(**kwargs)
|
|
117
117
|
self._client = _AqaModel(**kwargs)
|
|
118
118
|
|
|
119
|
-
def invoke(
|
|
120
|
-
self, input: AqaInput, config: Optional[RunnableConfig] = None
|
|
119
|
+
def invoke(
|
|
120
|
+
self, input: AqaInput, config: Optional[RunnableConfig] = None, **kwargs: Any
|
|
121
121
|
) -> AqaOutput:
|
|
122
122
|
"""Generates a grounded response using the provided passages."""
|
|
123
123
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 2.0.
|
|
3
|
+
Version: 2.0.3
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain-google
|
|
6
6
|
License: MIT
|
|
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
14
|
Provides-Extra: images
|
|
15
15
|
Requires-Dist: google-generativeai (>=0.8.0,<0.9.0)
|
|
16
|
-
Requires-Dist: langchain-core (>=0.3.
|
|
16
|
+
Requires-Dist: langchain-core (>=0.3.13,<0.4)
|
|
17
17
|
Requires-Dist: pillow (>=10.1.0,<11.0.0) ; extra == "images"
|
|
18
18
|
Requires-Dist: pydantic (>=2,<3)
|
|
19
19
|
Project-URL: Repository, https://github.com/langchain-ai/langchain-google
|
|
@@ -1,16 +1,16 @@
|
|
|
1
1
|
langchain_google_genai/__init__.py,sha256=Oji-S2KYWrku1wyQEskY84IOfY8MfRhujjJ4d7hbsk4,2758
|
|
2
2
|
langchain_google_genai/_common.py,sha256=ASlwE8hEbvOm55BVF_D4rf2nl7RYsnpsi5xbM6DW3Cc,1576
|
|
3
3
|
langchain_google_genai/_enums.py,sha256=KLPmxS1K83K4HjBIXFaXoL_sFEOv8Hq-2B2PDMKyDgo,197
|
|
4
|
-
langchain_google_genai/_function_utils.py,sha256=
|
|
5
|
-
langchain_google_genai/_genai_extension.py,sha256=
|
|
4
|
+
langchain_google_genai/_function_utils.py,sha256=ksKgLoKPhKzd3QLLgYojuq4tOegoEL-oHPncARPds3w,16891
|
|
5
|
+
langchain_google_genai/_genai_extension.py,sha256=81a4ly5ZHlqMf37uJfdB8K41qE6J5ujLnbUypIfFf2o,20775
|
|
6
6
|
langchain_google_genai/_image_utils.py,sha256=-0XgCMdYkvrIktFvUpy-2GPbFgfSVKZICawB2hiJzus,4999
|
|
7
|
-
langchain_google_genai/chat_models.py,sha256=
|
|
7
|
+
langchain_google_genai/chat_models.py,sha256=pObFpTYKh-of52BrAbpVv1UIDp_qZ8XRTDRhPYgNZ2M,56506
|
|
8
8
|
langchain_google_genai/embeddings.py,sha256=PBJlcRExfegD2V9hnKQHW99fJJGsbPbmMW1TfQtPmqU,10132
|
|
9
|
-
langchain_google_genai/genai_aqa.py,sha256=
|
|
9
|
+
langchain_google_genai/genai_aqa.py,sha256=qB6h3-BSXqe0YLR3eeVllYzmNKK6ofI6xJLdBahUVZo,4300
|
|
10
10
|
langchain_google_genai/google_vector_store.py,sha256=4wvhIiOmc3Fo046FyafPmT9NBCLek-9bgluvuTfrbpQ,16148
|
|
11
11
|
langchain_google_genai/llms.py,sha256=lXh0Se-kbBJlHrw0kR1DRsZw_bet1thKZ2HskvT8itU,14221
|
|
12
12
|
langchain_google_genai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
-
langchain_google_genai-2.0.
|
|
14
|
-
langchain_google_genai-2.0.
|
|
15
|
-
langchain_google_genai-2.0.
|
|
16
|
-
langchain_google_genai-2.0.
|
|
13
|
+
langchain_google_genai-2.0.3.dist-info/LICENSE,sha256=DppmdYJVSc1jd0aio6ptnMUn5tIHrdAhQ12SclEBfBg,1072
|
|
14
|
+
langchain_google_genai-2.0.3.dist-info/METADATA,sha256=OvxxVXvYGo3Qa7L8y1o06VX8JCR31WQSjOmpclTEhKA,3851
|
|
15
|
+
langchain_google_genai-2.0.3.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
16
|
+
langchain_google_genai-2.0.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|