langchain-google-genai 1.0.1__tar.gz → 1.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-google-genai might be problematic. Click here for more details.
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/PKG-INFO +3 -3
- langchain_google_genai-1.0.3/langchain_google_genai/_function_utils.py +241 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/chat_models.py +204 -53
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/embeddings.py +6 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/llms.py +14 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/pyproject.toml +7 -4
- langchain_google_genai-1.0.1/langchain_google_genai/_function_utils.py +0 -116
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/LICENSE +0 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/README.md +0 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/__init__.py +0 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/_common.py +0 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/_enums.py +0 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/_genai_extension.py +0 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/genai_aqa.py +0 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/google_vector_store.py +0 -0
- {langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/py.typed +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.3
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain-google
|
|
6
6
|
License: MIT
|
|
@@ -12,8 +12,8 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
14
|
Provides-Extra: images
|
|
15
|
-
Requires-Dist: google-generativeai (>=0.
|
|
16
|
-
Requires-Dist: langchain-core (>=0.1,<0.2)
|
|
15
|
+
Requires-Dist: google-generativeai (>=0.5.2,<0.6.0)
|
|
16
|
+
Requires-Dist: langchain-core (>=0.1.45,<0.2)
|
|
17
17
|
Requires-Dist: pillow (>=10.1.0,<11.0.0) ; extra == "images"
|
|
18
18
|
Project-URL: Repository, https://github.com/langchain-ai/langchain-google
|
|
19
19
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import (
|
|
4
|
+
Any,
|
|
5
|
+
Dict,
|
|
6
|
+
List,
|
|
7
|
+
Literal,
|
|
8
|
+
Optional,
|
|
9
|
+
Sequence,
|
|
10
|
+
Type,
|
|
11
|
+
TypedDict,
|
|
12
|
+
Union,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
import google.ai.generativelanguage as glm
|
|
16
|
+
from google.generativeai.types import Tool as GoogleTool # type: ignore[import]
|
|
17
|
+
from google.generativeai.types.content_types import ( # type: ignore[import]
|
|
18
|
+
FunctionCallingConfigType,
|
|
19
|
+
FunctionDeclarationType,
|
|
20
|
+
ToolDict,
|
|
21
|
+
ToolType,
|
|
22
|
+
)
|
|
23
|
+
from langchain_core.pydantic_v1 import BaseModel
|
|
24
|
+
from langchain_core.tools import BaseTool
|
|
25
|
+
from langchain_core.tools import tool as callable_as_lc_tool
|
|
26
|
+
from langchain_core.utils.json_schema import dereference_refs
|
|
27
|
+
|
|
28
|
+
TYPE_ENUM = {
|
|
29
|
+
"string": glm.Type.STRING,
|
|
30
|
+
"number": glm.Type.NUMBER,
|
|
31
|
+
"integer": glm.Type.INTEGER,
|
|
32
|
+
"boolean": glm.Type.BOOLEAN,
|
|
33
|
+
"array": glm.Type.ARRAY,
|
|
34
|
+
"object": glm.Type.OBJECT,
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
TYPE_ENUM_REVERSE = {v: k for k, v in TYPE_ENUM.items()}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def convert_to_genai_function_declarations(
|
|
41
|
+
tool: Union[
|
|
42
|
+
GoogleTool, ToolDict, FunctionDeclarationType, Sequence[FunctionDeclarationType]
|
|
43
|
+
],
|
|
44
|
+
) -> ToolType:
|
|
45
|
+
"""Convert any tool-like object to a ToolType.
|
|
46
|
+
|
|
47
|
+
https://github.com/google-gemini/generative-ai-python/blob/668695ebe3e9de496a36eeb95cb2ed2faba9b939/google/generativeai/types/content_types.py#L574
|
|
48
|
+
"""
|
|
49
|
+
if isinstance(tool, GoogleTool):
|
|
50
|
+
return tool
|
|
51
|
+
# check whether a dict is supported by glm, otherwise we parse it explicitly
|
|
52
|
+
if isinstance(tool, dict):
|
|
53
|
+
first_function_declaration = tool.get("function_declarations", [None])[0]
|
|
54
|
+
if isinstance(first_function_declaration, glm.FunctionDeclaration):
|
|
55
|
+
return tool
|
|
56
|
+
schema = None
|
|
57
|
+
try:
|
|
58
|
+
schema = first_function_declaration.parameters
|
|
59
|
+
except AttributeError:
|
|
60
|
+
pass
|
|
61
|
+
if schema is None:
|
|
62
|
+
schema = first_function_declaration.get("parameters")
|
|
63
|
+
if schema is None or isinstance(schema, glm.Schema):
|
|
64
|
+
return tool
|
|
65
|
+
return glm.Tool(
|
|
66
|
+
function_declarations=[
|
|
67
|
+
_convert_to_genai_function(fc) for fc in tool["function_declarations"]
|
|
68
|
+
],
|
|
69
|
+
)
|
|
70
|
+
elif isinstance(tool, type) and issubclass(tool, BaseModel):
|
|
71
|
+
return glm.Tool(function_declarations=[_convert_to_genai_function(tool)])
|
|
72
|
+
elif callable(tool):
|
|
73
|
+
return _convert_tool_to_genai_function(callable_as_lc_tool()(tool))
|
|
74
|
+
elif isinstance(tool, list):
|
|
75
|
+
return glm.Tool(
|
|
76
|
+
function_declarations=[_convert_to_genai_function(fc) for fc in tool]
|
|
77
|
+
)
|
|
78
|
+
return glm.Tool(function_declarations=[_convert_to_genai_function(tool)])
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def tool_to_dict(tool: Union[glm.Tool, GoogleTool]) -> ToolDict:
|
|
82
|
+
if isinstance(tool, GoogleTool):
|
|
83
|
+
tool = tool._proto
|
|
84
|
+
function_declarations = []
|
|
85
|
+
for function_declaration_proto in tool.function_declarations:
|
|
86
|
+
properties: Dict[str, Any] = {}
|
|
87
|
+
for property in function_declaration_proto.parameters.properties:
|
|
88
|
+
property_type = function_declaration_proto.parameters.properties[
|
|
89
|
+
property
|
|
90
|
+
].type
|
|
91
|
+
property_dict = {"type": TYPE_ENUM_REVERSE[property_type]}
|
|
92
|
+
property_description = function_declaration_proto.parameters.properties[
|
|
93
|
+
property
|
|
94
|
+
].description
|
|
95
|
+
if property_description:
|
|
96
|
+
property_dict["description"] = property_description
|
|
97
|
+
properties[property] = property_dict
|
|
98
|
+
function_declaration = {
|
|
99
|
+
"name": function_declaration_proto.name,
|
|
100
|
+
"description": function_declaration_proto.description,
|
|
101
|
+
"parameters": {"type": "object", "properties": properties},
|
|
102
|
+
}
|
|
103
|
+
if function_declaration_proto.parameters.required:
|
|
104
|
+
function_declaration["parameters"][ # type: ignore[index]
|
|
105
|
+
"required"
|
|
106
|
+
] = function_declaration_proto.parameters.required
|
|
107
|
+
function_declarations.append(function_declaration)
|
|
108
|
+
return {"function_declarations": function_declarations}
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def _convert_to_genai_function(fc: FunctionDeclarationType) -> glm.FunctionDeclaration:
|
|
112
|
+
if isinstance(fc, BaseTool):
|
|
113
|
+
return _convert_tool_to_genai_function(fc)
|
|
114
|
+
elif isinstance(fc, type) and issubclass(fc, BaseModel):
|
|
115
|
+
return _convert_pydantic_to_genai_function(fc)
|
|
116
|
+
elif callable(fc):
|
|
117
|
+
return _convert_tool_to_genai_function(callable_as_lc_tool()(fc))
|
|
118
|
+
elif isinstance(fc, dict):
|
|
119
|
+
return glm.FunctionDeclaration(
|
|
120
|
+
name=fc["name"],
|
|
121
|
+
description=fc.get("description"),
|
|
122
|
+
parameters={
|
|
123
|
+
"properties": {
|
|
124
|
+
k: {
|
|
125
|
+
"type_": TYPE_ENUM[v["type"]],
|
|
126
|
+
"description": v.get("description"),
|
|
127
|
+
}
|
|
128
|
+
for k, v in fc["parameters"]["properties"].items()
|
|
129
|
+
},
|
|
130
|
+
"required": fc["parameters"].get("required", []),
|
|
131
|
+
"type_": TYPE_ENUM[fc["parameters"]["type"]],
|
|
132
|
+
},
|
|
133
|
+
)
|
|
134
|
+
else:
|
|
135
|
+
raise ValueError(f"Unsupported function call type {fc}")
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _convert_tool_to_genai_function(tool: BaseTool) -> glm.FunctionDeclaration:
|
|
139
|
+
if tool.args_schema:
|
|
140
|
+
schema = dereference_refs(tool.args_schema.schema())
|
|
141
|
+
schema.pop("definitions", None)
|
|
142
|
+
return glm.FunctionDeclaration(
|
|
143
|
+
name=tool.name or schema["title"],
|
|
144
|
+
description=tool.description or schema["description"],
|
|
145
|
+
parameters={
|
|
146
|
+
"properties": {
|
|
147
|
+
k: {
|
|
148
|
+
"type_": TYPE_ENUM[v["type"]],
|
|
149
|
+
"description": v.get("description"),
|
|
150
|
+
}
|
|
151
|
+
for k, v in schema["properties"].items()
|
|
152
|
+
},
|
|
153
|
+
"required": schema.get("required", []),
|
|
154
|
+
"type_": TYPE_ENUM[schema["type"]],
|
|
155
|
+
},
|
|
156
|
+
)
|
|
157
|
+
else:
|
|
158
|
+
return glm.FunctionDeclaration(
|
|
159
|
+
name=tool.name,
|
|
160
|
+
description=tool.description,
|
|
161
|
+
parameters={
|
|
162
|
+
"properties": {
|
|
163
|
+
"__arg1": {"type_": TYPE_ENUM["string"]},
|
|
164
|
+
},
|
|
165
|
+
"required": ["__arg1"],
|
|
166
|
+
"type_": TYPE_ENUM["object"],
|
|
167
|
+
},
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _convert_pydantic_to_genai_function(
|
|
172
|
+
pydantic_model: Type[BaseModel],
|
|
173
|
+
) -> glm.FunctionDeclaration:
|
|
174
|
+
schema = dereference_refs(pydantic_model.schema())
|
|
175
|
+
schema.pop("definitions", None)
|
|
176
|
+
return glm.FunctionDeclaration(
|
|
177
|
+
name=schema["title"],
|
|
178
|
+
description=schema.get("description", ""),
|
|
179
|
+
parameters={
|
|
180
|
+
"properties": {
|
|
181
|
+
k: {
|
|
182
|
+
"type_": TYPE_ENUM[v["type"]],
|
|
183
|
+
"description": v.get("description"),
|
|
184
|
+
}
|
|
185
|
+
for k, v in schema["properties"].items()
|
|
186
|
+
},
|
|
187
|
+
"required": schema["required"],
|
|
188
|
+
"type_": TYPE_ENUM[schema["type"]],
|
|
189
|
+
},
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
_ToolChoiceType = Union[
|
|
194
|
+
dict, List[str], str, Literal["auto", "none", "any"], Literal[True]
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
class _ToolConfigDict(TypedDict):
|
|
199
|
+
function_calling_config: FunctionCallingConfigType
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def _tool_choice_to_tool_config(
|
|
203
|
+
tool_choice: _ToolChoiceType,
|
|
204
|
+
all_names: List[str],
|
|
205
|
+
) -> _ToolConfigDict:
|
|
206
|
+
allowed_function_names: Optional[List[str]] = None
|
|
207
|
+
if tool_choice is True or tool_choice == "any":
|
|
208
|
+
mode = "any"
|
|
209
|
+
allowed_function_names = all_names
|
|
210
|
+
elif tool_choice == "auto":
|
|
211
|
+
mode = "auto"
|
|
212
|
+
elif tool_choice == "none":
|
|
213
|
+
mode = "none"
|
|
214
|
+
elif isinstance(tool_choice, str):
|
|
215
|
+
mode = "any"
|
|
216
|
+
allowed_function_names = [tool_choice]
|
|
217
|
+
elif isinstance(tool_choice, list):
|
|
218
|
+
mode = "any"
|
|
219
|
+
allowed_function_names = tool_choice
|
|
220
|
+
elif isinstance(tool_choice, dict):
|
|
221
|
+
if "mode" in tool_choice:
|
|
222
|
+
mode = tool_choice["mode"]
|
|
223
|
+
allowed_function_names = tool_choice.get("allowed_function_names")
|
|
224
|
+
elif "function_calling_config" in tool_choice:
|
|
225
|
+
mode = tool_choice["function_calling_config"]["mode"]
|
|
226
|
+
allowed_function_names = tool_choice["function_calling_config"].get(
|
|
227
|
+
"allowed_function_names"
|
|
228
|
+
)
|
|
229
|
+
else:
|
|
230
|
+
raise ValueError(
|
|
231
|
+
f"Unrecognized tool choice format:\n\n{tool_choice=}\n\nShould match "
|
|
232
|
+
f"Google GenerativeAI ToolConfig or FunctionCallingConfig format."
|
|
233
|
+
)
|
|
234
|
+
else:
|
|
235
|
+
raise ValueError(f"Unrecognized tool choice format:\n\n{tool_choice=}")
|
|
236
|
+
return _ToolConfigDict(
|
|
237
|
+
function_calling_config={
|
|
238
|
+
"mode": mode,
|
|
239
|
+
"allowed_function_names": allowed_function_names,
|
|
240
|
+
}
|
|
241
|
+
)
|
{langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/chat_models.py
RENAMED
|
@@ -4,6 +4,8 @@ import base64
|
|
|
4
4
|
import json
|
|
5
5
|
import logging
|
|
6
6
|
import os
|
|
7
|
+
import uuid
|
|
8
|
+
import warnings
|
|
7
9
|
from io import BytesIO
|
|
8
10
|
from typing import (
|
|
9
11
|
Any,
|
|
@@ -28,10 +30,17 @@ import google.api_core
|
|
|
28
30
|
import google.generativeai as genai # type: ignore[import]
|
|
29
31
|
import proto # type: ignore[import]
|
|
30
32
|
import requests
|
|
33
|
+
from google.generativeai.types import SafetySettingDict # type: ignore[import]
|
|
34
|
+
from google.generativeai.types import Tool as GoogleTool # type: ignore[import]
|
|
35
|
+
from google.generativeai.types.content_types import ( # type: ignore[import]
|
|
36
|
+
FunctionDeclarationType,
|
|
37
|
+
ToolDict,
|
|
38
|
+
)
|
|
31
39
|
from langchain_core.callbacks.manager import (
|
|
32
40
|
AsyncCallbackManagerForLLMRun,
|
|
33
41
|
CallbackManagerForLLMRun,
|
|
34
42
|
)
|
|
43
|
+
from langchain_core.language_models import LanguageModelInput
|
|
35
44
|
from langchain_core.language_models.chat_models import BaseChatModel
|
|
36
45
|
from langchain_core.messages import (
|
|
37
46
|
AIMessage,
|
|
@@ -39,10 +48,16 @@ from langchain_core.messages import (
|
|
|
39
48
|
BaseMessage,
|
|
40
49
|
FunctionMessage,
|
|
41
50
|
HumanMessage,
|
|
51
|
+
InvalidToolCall,
|
|
42
52
|
SystemMessage,
|
|
53
|
+
ToolCall,
|
|
54
|
+
ToolCallChunk,
|
|
55
|
+
ToolMessage,
|
|
43
56
|
)
|
|
57
|
+
from langchain_core.output_parsers.openai_tools import parse_tool_calls
|
|
44
58
|
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
|
45
59
|
from langchain_core.pydantic_v1 import SecretStr, root_validator
|
|
60
|
+
from langchain_core.runnables import Runnable
|
|
46
61
|
from langchain_core.utils import get_from_dict_or_env
|
|
47
62
|
from tenacity import (
|
|
48
63
|
before_sleep_log,
|
|
@@ -54,7 +69,11 @@ from tenacity import (
|
|
|
54
69
|
|
|
55
70
|
from langchain_google_genai._common import GoogleGenerativeAIError
|
|
56
71
|
from langchain_google_genai._function_utils import (
|
|
72
|
+
_tool_choice_to_tool_config,
|
|
73
|
+
_ToolChoiceType,
|
|
74
|
+
_ToolConfigDict,
|
|
57
75
|
convert_to_genai_function_declarations,
|
|
76
|
+
tool_to_dict,
|
|
58
77
|
)
|
|
59
78
|
from langchain_google_genai.llms import GoogleModelFamily, _BaseGoogleGenerativeAI
|
|
60
79
|
|
|
@@ -300,27 +319,16 @@ def _convert_to_parts(
|
|
|
300
319
|
|
|
301
320
|
def _parse_chat_history(
|
|
302
321
|
input_messages: Sequence[BaseMessage], convert_system_message_to_human: bool = False
|
|
303
|
-
) -> List[genai.types.ContentDict]:
|
|
322
|
+
) -> Tuple[Optional[genai.types.ContentDict], List[genai.types.ContentDict]]:
|
|
304
323
|
messages: List[genai.types.MessageDict] = []
|
|
305
324
|
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
if (
|
|
309
|
-
i == 0
|
|
310
|
-
and isinstance(message, SystemMessage)
|
|
311
|
-
and not convert_system_message_to_human
|
|
312
|
-
):
|
|
313
|
-
raise ValueError(
|
|
314
|
-
"""SystemMessages are not yet supported!
|
|
315
|
-
|
|
316
|
-
To automatically convert the leading SystemMessage to a HumanMessage,
|
|
317
|
-
set `convert_system_message_to_human` to True. Example:
|
|
325
|
+
if convert_system_message_to_human:
|
|
326
|
+
warnings.warn("Convert_system_message_to_human will be deprecated!")
|
|
318
327
|
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
raw_system_message = message
|
|
328
|
+
system_instruction: Optional[genai.types.ContentDict] = None
|
|
329
|
+
for i, message in enumerate(input_messages):
|
|
330
|
+
if i == 0 and isinstance(message, SystemMessage):
|
|
331
|
+
system_instruction = _convert_to_parts(message.content)
|
|
324
332
|
continue
|
|
325
333
|
elif isinstance(message, AIMessage):
|
|
326
334
|
role = "model"
|
|
@@ -360,42 +368,133 @@ llm = ChatGoogleGenerativeAI(model="gemini-pro", convert_system_message_to_human
|
|
|
360
368
|
)
|
|
361
369
|
)
|
|
362
370
|
]
|
|
371
|
+
elif isinstance(message, ToolMessage):
|
|
372
|
+
role = "user"
|
|
373
|
+
prev_message: Optional[BaseMessage] = (
|
|
374
|
+
input_messages[i - 1] if i > 0 else None
|
|
375
|
+
)
|
|
376
|
+
if (
|
|
377
|
+
prev_message
|
|
378
|
+
and isinstance(prev_message, AIMessage)
|
|
379
|
+
and prev_message.tool_calls
|
|
380
|
+
):
|
|
381
|
+
# message.name can be null for ToolMessage
|
|
382
|
+
name: str = prev_message.tool_calls[0]["name"]
|
|
383
|
+
else:
|
|
384
|
+
name = message.name # type: ignore
|
|
385
|
+
tool_response: Any
|
|
386
|
+
if not isinstance(message.content, str):
|
|
387
|
+
tool_response = message.content
|
|
388
|
+
else:
|
|
389
|
+
try:
|
|
390
|
+
tool_response = json.loads(message.content)
|
|
391
|
+
except json.JSONDecodeError:
|
|
392
|
+
tool_response = message.content # leave as str representation
|
|
393
|
+
parts = [
|
|
394
|
+
glm.Part(
|
|
395
|
+
function_response=glm.FunctionResponse(
|
|
396
|
+
name=name,
|
|
397
|
+
response=(
|
|
398
|
+
{"output": tool_response}
|
|
399
|
+
if not isinstance(tool_response, dict)
|
|
400
|
+
else tool_response
|
|
401
|
+
),
|
|
402
|
+
)
|
|
403
|
+
)
|
|
404
|
+
]
|
|
363
405
|
else:
|
|
364
406
|
raise ValueError(
|
|
365
407
|
f"Unexpected message with type {type(message)} at the position {i}."
|
|
366
408
|
)
|
|
367
409
|
|
|
368
|
-
if raw_system_message:
|
|
369
|
-
if role == "model":
|
|
370
|
-
raise ValueError(
|
|
371
|
-
"SystemMessage should be followed by a HumanMessage and "
|
|
372
|
-
"not by AIMessage."
|
|
373
|
-
)
|
|
374
|
-
parts = _convert_to_parts(raw_system_message.content) + parts
|
|
375
|
-
raw_system_message = None
|
|
376
410
|
messages.append({"role": role, "parts": parts})
|
|
377
|
-
return messages
|
|
411
|
+
return system_instruction, messages
|
|
378
412
|
|
|
379
413
|
|
|
380
414
|
def _parse_response_candidate(
|
|
381
|
-
response_candidate: glm.Candidate,
|
|
415
|
+
response_candidate: glm.Candidate, streaming: bool = False
|
|
382
416
|
) -> AIMessage:
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
417
|
+
content: Union[None, str, List[str]] = None
|
|
418
|
+
additional_kwargs = {}
|
|
419
|
+
tool_calls = []
|
|
420
|
+
invalid_tool_calls = []
|
|
421
|
+
tool_call_chunks = []
|
|
422
|
+
|
|
423
|
+
for part in response_candidate.content.parts:
|
|
424
|
+
try:
|
|
425
|
+
text: Optional[str] = part.text
|
|
426
|
+
except AttributeError:
|
|
427
|
+
text = None
|
|
428
|
+
|
|
429
|
+
if text is not None:
|
|
430
|
+
if not content:
|
|
431
|
+
content = text
|
|
432
|
+
elif isinstance(content, str) and text:
|
|
433
|
+
content = [content, text]
|
|
434
|
+
elif isinstance(content, list) and text:
|
|
435
|
+
content.append(text)
|
|
436
|
+
elif text:
|
|
437
|
+
raise Exception("Unexpected content type")
|
|
438
|
+
|
|
439
|
+
if part.function_call:
|
|
440
|
+
# TODO: support multiple function calls
|
|
441
|
+
if "function_call" in additional_kwargs:
|
|
442
|
+
raise Exception("Multiple function calls are not currently supported")
|
|
443
|
+
function_call = {"name": part.function_call.name}
|
|
444
|
+
# dump to match other function calling llm for now
|
|
445
|
+
function_call_args_dict = proto.Message.to_dict(part.function_call)["args"]
|
|
446
|
+
function_call["arguments"] = json.dumps(
|
|
447
|
+
{k: function_call_args_dict[k] for k in function_call_args_dict}
|
|
448
|
+
)
|
|
449
|
+
additional_kwargs["function_call"] = function_call
|
|
450
|
+
|
|
451
|
+
if streaming:
|
|
452
|
+
tool_call_chunks.append(
|
|
453
|
+
ToolCallChunk(
|
|
454
|
+
name=function_call.get("name"),
|
|
455
|
+
args=function_call.get("arguments"),
|
|
456
|
+
id=function_call.get("id", str(uuid.uuid4())),
|
|
457
|
+
index=function_call.get("index"), # type: ignore
|
|
458
|
+
)
|
|
459
|
+
)
|
|
460
|
+
else:
|
|
461
|
+
try:
|
|
462
|
+
tool_calls_dicts = parse_tool_calls(
|
|
463
|
+
[{"function": function_call}],
|
|
464
|
+
return_id=False,
|
|
465
|
+
)
|
|
466
|
+
tool_calls = [
|
|
467
|
+
ToolCall(
|
|
468
|
+
name=tool_call["name"],
|
|
469
|
+
args=tool_call["args"],
|
|
470
|
+
id=tool_call.get("id", str(uuid.uuid4())),
|
|
471
|
+
)
|
|
472
|
+
for tool_call in tool_calls_dicts
|
|
473
|
+
]
|
|
474
|
+
except Exception as e:
|
|
475
|
+
invalid_tool_calls = [
|
|
476
|
+
InvalidToolCall(
|
|
477
|
+
name=function_call.get("name"),
|
|
478
|
+
args=function_call.get("arguments"),
|
|
479
|
+
id=function_call.get("id", str(uuid.uuid4())),
|
|
480
|
+
error=str(e),
|
|
481
|
+
)
|
|
482
|
+
]
|
|
483
|
+
if content is None:
|
|
484
|
+
content = ""
|
|
485
|
+
|
|
486
|
+
if streaming:
|
|
487
|
+
return AIMessageChunk(
|
|
488
|
+
content=cast(Union[str, List[Union[str, Dict[Any, Any]]]], content),
|
|
489
|
+
additional_kwargs=additional_kwargs,
|
|
490
|
+
tool_call_chunks=tool_call_chunks,
|
|
389
491
|
)
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
content = [proto.Message.to_dict(part) for part in parts]
|
|
397
|
-
return (AIMessageChunk if stream else AIMessage)(
|
|
398
|
-
content=content, additional_kwargs={}
|
|
492
|
+
|
|
493
|
+
return AIMessage(
|
|
494
|
+
content=cast(Union[str, List[Union[str, Dict[Any, Any]]]], content),
|
|
495
|
+
additional_kwargs=additional_kwargs,
|
|
496
|
+
tool_calls=tool_calls,
|
|
497
|
+
invalid_tool_calls=invalid_tool_calls,
|
|
399
498
|
)
|
|
400
499
|
|
|
401
500
|
|
|
@@ -418,7 +517,7 @@ def _response_to_result(
|
|
|
418
517
|
]
|
|
419
518
|
generations.append(
|
|
420
519
|
(ChatGenerationChunk if stream else ChatGeneration)(
|
|
421
|
-
message=_parse_response_candidate(candidate,
|
|
520
|
+
message=_parse_response_candidate(candidate, streaming=stream),
|
|
422
521
|
generation_info=generation_info,
|
|
423
522
|
)
|
|
424
523
|
)
|
|
@@ -483,11 +582,15 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
483
582
|
@root_validator()
|
|
484
583
|
def validate_environment(cls, values: Dict) -> Dict:
|
|
485
584
|
"""Validates params and passes them to google-generativeai package."""
|
|
585
|
+
additional_headers = values.get("additional_headers") or {}
|
|
586
|
+
default_metadata = tuple(additional_headers.items())
|
|
587
|
+
|
|
486
588
|
if values.get("credentials"):
|
|
487
589
|
genai.configure(
|
|
488
590
|
credentials=values.get("credentials"),
|
|
489
591
|
transport=values.get("transport"),
|
|
490
592
|
client_options=values.get("client_options"),
|
|
593
|
+
default_metadata=default_metadata,
|
|
491
594
|
)
|
|
492
595
|
else:
|
|
493
596
|
google_api_key = get_from_dict_or_env(
|
|
@@ -500,6 +603,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
500
603
|
api_key=google_api_key,
|
|
501
604
|
transport=values.get("transport"),
|
|
502
605
|
client_options=values.get("client_options"),
|
|
606
|
+
default_metadata=default_metadata,
|
|
503
607
|
)
|
|
504
608
|
if (
|
|
505
609
|
values.get("temperature") is not None
|
|
@@ -640,25 +744,38 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
640
744
|
self,
|
|
641
745
|
messages: List[BaseMessage],
|
|
642
746
|
stop: Optional[List[str]] = None,
|
|
747
|
+
tools: Optional[Sequence[Union[ToolDict, GoogleTool]]] = None,
|
|
748
|
+
functions: Optional[Sequence[FunctionDeclarationType]] = None,
|
|
749
|
+
safety_settings: Optional[SafetySettingDict] = None,
|
|
750
|
+
tool_config: Optional[Union[Dict, _ToolConfigDict]] = None,
|
|
643
751
|
**kwargs: Any,
|
|
644
752
|
) -> Tuple[Dict[str, Any], genai.ChatSession, genai.types.ContentDict]:
|
|
645
753
|
client = self.client
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
754
|
+
formatted_tools = None
|
|
755
|
+
if tools:
|
|
756
|
+
formatted_tools = [
|
|
757
|
+
convert_to_genai_function_declarations(tool) for tool in tools
|
|
758
|
+
]
|
|
759
|
+
elif functions:
|
|
760
|
+
formatted_tools = [convert_to_genai_function_declarations(functions)]
|
|
761
|
+
|
|
762
|
+
if formatted_tools or safety_settings:
|
|
652
763
|
client = genai.GenerativeModel(
|
|
653
|
-
model_name=self.model,
|
|
764
|
+
model_name=self.model,
|
|
765
|
+
tools=formatted_tools,
|
|
766
|
+
safety_settings=safety_settings,
|
|
654
767
|
)
|
|
655
768
|
|
|
656
|
-
params = self._prepare_params(stop, **kwargs)
|
|
657
|
-
history = _parse_chat_history(
|
|
769
|
+
params = self._prepare_params(stop, tool_config=tool_config, **kwargs)
|
|
770
|
+
system_instruction, history = _parse_chat_history(
|
|
658
771
|
messages,
|
|
659
772
|
convert_system_message_to_human=self.convert_system_message_to_human,
|
|
660
773
|
)
|
|
661
774
|
message = history.pop()
|
|
775
|
+
if self.client._system_instruction != system_instruction:
|
|
776
|
+
self.client = genai.GenerativeModel(
|
|
777
|
+
model_name=self.model, system_instruction=system_instruction
|
|
778
|
+
)
|
|
662
779
|
chat = client.start_chat(history=history)
|
|
663
780
|
return params, chat, message
|
|
664
781
|
|
|
@@ -681,3 +798,37 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
681
798
|
token_count = result["token_count"]
|
|
682
799
|
|
|
683
800
|
return token_count
|
|
801
|
+
|
|
802
|
+
def bind_tools(
|
|
803
|
+
self,
|
|
804
|
+
tools: Sequence[Union[ToolDict, GoogleTool]],
|
|
805
|
+
tool_config: Optional[Union[Dict, _ToolConfigDict]] = None,
|
|
806
|
+
*,
|
|
807
|
+
tool_choice: Optional[Union[_ToolChoiceType, bool]] = None,
|
|
808
|
+
**kwargs: Any,
|
|
809
|
+
) -> Runnable[LanguageModelInput, BaseMessage]:
|
|
810
|
+
"""Bind tool-like objects to this chat model.
|
|
811
|
+
|
|
812
|
+
Assumes model is compatible with google-generativeAI tool-calling API.
|
|
813
|
+
|
|
814
|
+
Args:
|
|
815
|
+
tools: A list of tool definitions to bind to this chat model.
|
|
816
|
+
Can be a pydantic model, callable, or BaseTool. Pydantic
|
|
817
|
+
models, callables, and BaseTools will be automatically converted to
|
|
818
|
+
their schema dictionary representation.
|
|
819
|
+
**kwargs: Any additional parameters to pass to the
|
|
820
|
+
:class:`~langchain.runnable.Runnable` constructor.
|
|
821
|
+
"""
|
|
822
|
+
if tool_choice and tool_config:
|
|
823
|
+
raise ValueError(
|
|
824
|
+
"Must specify at most one of tool_choice and tool_config, received "
|
|
825
|
+
f"both:\n\n{tool_choice=}\n\n{tool_config=}"
|
|
826
|
+
)
|
|
827
|
+
# Bind dicts for easier serialization/deserialization.
|
|
828
|
+
genai_tools = [tool_to_dict(convert_to_genai_function_declarations(tools))]
|
|
829
|
+
if tool_choice:
|
|
830
|
+
all_names = [
|
|
831
|
+
f["name"] for t in genai_tools for f in t["function_declarations"]
|
|
832
|
+
]
|
|
833
|
+
tool_config = _tool_choice_to_tool_config(tool_choice, all_names)
|
|
834
|
+
return self.bind(tools=genai_tools, tool_config=tool_config, **kwargs)
|
{langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/embeddings.py
RENAMED
|
@@ -61,6 +61,11 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
61
61
|
None,
|
|
62
62
|
description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
|
|
63
63
|
)
|
|
64
|
+
request_options: Optional[Dict] = Field(
|
|
65
|
+
None,
|
|
66
|
+
description="A dictionary of request options to pass to the Google API client."
|
|
67
|
+
"Example: `{'timeout': 10}`",
|
|
68
|
+
)
|
|
64
69
|
|
|
65
70
|
@root_validator()
|
|
66
71
|
def validate_environment(cls, values: Dict) -> Dict:
|
|
@@ -95,6 +100,7 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
|
|
|
95
100
|
content=texts,
|
|
96
101
|
task_type=task_type,
|
|
97
102
|
title=title,
|
|
103
|
+
request_options=self.request_options,
|
|
98
104
|
)
|
|
99
105
|
except Exception as e:
|
|
100
106
|
raise GoogleGenerativeAIError(f"Error embedding content: {e}") from e
|
{langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/llms.py
RENAMED
|
@@ -86,6 +86,7 @@ def _completion_with_retry(
|
|
|
86
86
|
stream=stream,
|
|
87
87
|
generation_config=generation_config,
|
|
88
88
|
safety_settings=kwargs.pop("safety_settings", None),
|
|
89
|
+
request_options={"timeout": llm.timeout} if llm.timeout else None,
|
|
89
90
|
)
|
|
90
91
|
return llm.client.generate_text(prompt=prompt, **kwargs)
|
|
91
92
|
except google.api_core.exceptions.FailedPrecondition as exc:
|
|
@@ -143,6 +144,10 @@ Supported examples:
|
|
|
143
144
|
not return the full n completions if duplicates are generated."""
|
|
144
145
|
max_retries: int = 6
|
|
145
146
|
"""The maximum number of retries to make when generating."""
|
|
147
|
+
|
|
148
|
+
timeout: Optional[float] = None
|
|
149
|
+
"""The maximum number of seconds to wait for a response."""
|
|
150
|
+
|
|
146
151
|
client_options: Optional[Dict] = Field(
|
|
147
152
|
None,
|
|
148
153
|
description=(
|
|
@@ -154,6 +159,12 @@ Supported examples:
|
|
|
154
159
|
None,
|
|
155
160
|
description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
|
|
156
161
|
)
|
|
162
|
+
additional_headers: Optional[Dict[str, str]] = Field(
|
|
163
|
+
None,
|
|
164
|
+
description=(
|
|
165
|
+
"A key-value dictionary representing additional headers for the model call"
|
|
166
|
+
),
|
|
167
|
+
)
|
|
157
168
|
|
|
158
169
|
safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
|
|
159
170
|
"""The default safety settings to use for all generations.
|
|
@@ -253,6 +264,9 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
253
264
|
if values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0:
|
|
254
265
|
raise ValueError("max_output_tokens must be greater than zero")
|
|
255
266
|
|
|
267
|
+
if values["timeout"] is not None and values["timeout"] <= 0:
|
|
268
|
+
raise ValueError("timeout must be greater than zero")
|
|
269
|
+
|
|
256
270
|
return values
|
|
257
271
|
|
|
258
272
|
def _generate(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "langchain-google-genai"
|
|
3
|
-
version = "1.0.
|
|
3
|
+
version = "1.0.3"
|
|
4
4
|
description = "An integration package connecting Google's genai package and LangChain"
|
|
5
5
|
authors = []
|
|
6
6
|
readme = "README.md"
|
|
@@ -12,8 +12,8 @@ license = "MIT"
|
|
|
12
12
|
|
|
13
13
|
[tool.poetry.dependencies]
|
|
14
14
|
python = ">=3.9,<4.0"
|
|
15
|
-
langchain-core = "
|
|
16
|
-
google-generativeai = "^0.
|
|
15
|
+
langchain-core = ">=0.1.45,<0.2"
|
|
16
|
+
google-generativeai = "^0.5.2"
|
|
17
17
|
pillow = { version = "^10.1.0", optional = true }
|
|
18
18
|
|
|
19
19
|
[tool.poetry.extras]
|
|
@@ -30,6 +30,7 @@ syrupy = "^4.0.2"
|
|
|
30
30
|
pytest-watcher = "^0.3.4"
|
|
31
31
|
pytest-asyncio = "^0.21.1"
|
|
32
32
|
numpy = "^1.26.2"
|
|
33
|
+
langchain-core = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/core"}
|
|
33
34
|
|
|
34
35
|
[tool.poetry.group.codespell]
|
|
35
36
|
optional = true
|
|
@@ -56,6 +57,7 @@ types-requests = "^2.28.11.5"
|
|
|
56
57
|
types-google-cloud-ndb = "^2.2.0.1"
|
|
57
58
|
types-pillow = "^10.1.0.2"
|
|
58
59
|
types-protobuf = "^4.24.0.20240302"
|
|
60
|
+
langchain-core = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/core"}
|
|
59
61
|
|
|
60
62
|
[tool.poetry.group.dev]
|
|
61
63
|
optional = true
|
|
@@ -65,6 +67,7 @@ pillow = "^10.1.0"
|
|
|
65
67
|
types-requests = "^2.31.0.10"
|
|
66
68
|
types-pillow = "^10.1.0.2"
|
|
67
69
|
types-google-cloud-ndb = "^2.2.0.1"
|
|
70
|
+
langchain-core = {git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/core"}
|
|
68
71
|
|
|
69
72
|
[tool.ruff]
|
|
70
73
|
select = [
|
|
@@ -93,7 +96,7 @@ build-backend = "poetry.core.masonry.api"
|
|
|
93
96
|
#
|
|
94
97
|
# https://github.com/tophat/syrupy
|
|
95
98
|
# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite.
|
|
96
|
-
addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
|
|
99
|
+
#addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
|
|
97
100
|
# Registering custom markers.
|
|
98
101
|
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
|
|
99
102
|
markers = [
|
|
@@ -1,116 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import (
|
|
4
|
-
Dict,
|
|
5
|
-
List,
|
|
6
|
-
Type,
|
|
7
|
-
Union,
|
|
8
|
-
)
|
|
9
|
-
|
|
10
|
-
import google.ai.generativelanguage as glm
|
|
11
|
-
from langchain_core.pydantic_v1 import BaseModel
|
|
12
|
-
from langchain_core.tools import BaseTool
|
|
13
|
-
from langchain_core.utils.json_schema import dereference_refs
|
|
14
|
-
|
|
15
|
-
FunctionCallType = Union[BaseTool, Type[BaseModel], Dict]
|
|
16
|
-
|
|
17
|
-
TYPE_ENUM = {
|
|
18
|
-
"string": glm.Type.STRING,
|
|
19
|
-
"number": glm.Type.NUMBER,
|
|
20
|
-
"integer": glm.Type.INTEGER,
|
|
21
|
-
"boolean": glm.Type.BOOLEAN,
|
|
22
|
-
"array": glm.Type.ARRAY,
|
|
23
|
-
"object": glm.Type.OBJECT,
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def convert_to_genai_function_declarations(
|
|
28
|
-
function_calls: List[FunctionCallType],
|
|
29
|
-
) -> List[glm.Tool]:
|
|
30
|
-
return [
|
|
31
|
-
glm.Tool(
|
|
32
|
-
function_declarations=[_convert_to_genai_function(fc)],
|
|
33
|
-
)
|
|
34
|
-
for fc in function_calls
|
|
35
|
-
]
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
def _convert_to_genai_function(fc: FunctionCallType) -> glm.FunctionDeclaration:
|
|
39
|
-
if isinstance(fc, BaseTool):
|
|
40
|
-
return _convert_tool_to_genai_function(fc)
|
|
41
|
-
elif isinstance(fc, type) and issubclass(fc, BaseModel):
|
|
42
|
-
return _convert_pydantic_to_genai_function(fc)
|
|
43
|
-
elif isinstance(fc, dict):
|
|
44
|
-
return glm.FunctionDeclaration(
|
|
45
|
-
name=fc["name"],
|
|
46
|
-
description=fc.get("description"),
|
|
47
|
-
parameters={
|
|
48
|
-
"properties": {
|
|
49
|
-
k: {
|
|
50
|
-
"type_": TYPE_ENUM[v["type"]],
|
|
51
|
-
"description": v.get("description"),
|
|
52
|
-
}
|
|
53
|
-
for k, v in fc["parameters"]["properties"].items()
|
|
54
|
-
},
|
|
55
|
-
"required": fc["parameters"].get("required", []),
|
|
56
|
-
"type_": TYPE_ENUM[fc["parameters"]["type"]],
|
|
57
|
-
},
|
|
58
|
-
)
|
|
59
|
-
else:
|
|
60
|
-
raise ValueError(f"Unsupported function call type {fc}")
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
def _convert_tool_to_genai_function(tool: BaseTool) -> glm.FunctionDeclaration:
|
|
64
|
-
if tool.args_schema:
|
|
65
|
-
schema = dereference_refs(tool.args_schema.schema())
|
|
66
|
-
schema.pop("definitions", None)
|
|
67
|
-
|
|
68
|
-
return glm.FunctionDeclaration(
|
|
69
|
-
name=tool.name or schema["title"],
|
|
70
|
-
description=tool.description or schema["description"],
|
|
71
|
-
parameters={
|
|
72
|
-
"properties": {
|
|
73
|
-
k: {
|
|
74
|
-
"type_": TYPE_ENUM[v["type"]],
|
|
75
|
-
"description": v.get("description"),
|
|
76
|
-
}
|
|
77
|
-
for k, v in schema["properties"].items()
|
|
78
|
-
},
|
|
79
|
-
"required": schema["required"],
|
|
80
|
-
"type_": TYPE_ENUM[schema["type"]],
|
|
81
|
-
},
|
|
82
|
-
)
|
|
83
|
-
else:
|
|
84
|
-
return glm.FunctionDeclaration(
|
|
85
|
-
name=tool.name,
|
|
86
|
-
description=tool.description,
|
|
87
|
-
parameters={
|
|
88
|
-
"properties": {
|
|
89
|
-
"__arg1": {"type_": TYPE_ENUM["string"]},
|
|
90
|
-
},
|
|
91
|
-
"required": ["__arg1"],
|
|
92
|
-
"type_": TYPE_ENUM["object"],
|
|
93
|
-
},
|
|
94
|
-
)
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
def _convert_pydantic_to_genai_function(
|
|
98
|
-
pydantic_model: Type[BaseModel],
|
|
99
|
-
) -> glm.FunctionDeclaration:
|
|
100
|
-
schema = dereference_refs(pydantic_model.schema())
|
|
101
|
-
schema.pop("definitions", None)
|
|
102
|
-
return glm.FunctionDeclaration(
|
|
103
|
-
name=schema["title"],
|
|
104
|
-
description=schema.get("description", ""),
|
|
105
|
-
parameters={
|
|
106
|
-
"properties": {
|
|
107
|
-
k: {
|
|
108
|
-
"type_": TYPE_ENUM[v["type"]],
|
|
109
|
-
"description": v.get("description"),
|
|
110
|
-
}
|
|
111
|
-
for k, v in schema["properties"].items()
|
|
112
|
-
},
|
|
113
|
-
"required": schema["required"],
|
|
114
|
-
"type_": TYPE_ENUM[schema["type"]],
|
|
115
|
-
},
|
|
116
|
-
)
|
|
File without changes
|
|
File without changes
|
{langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/__init__.py
RENAMED
|
File without changes
|
{langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/_common.py
RENAMED
|
File without changes
|
{langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/_enums.py
RENAMED
|
File without changes
|
|
File without changes
|
{langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/genai_aqa.py
RENAMED
|
File without changes
|
|
File without changes
|
{langchain_google_genai-1.0.1 → langchain_google_genai-1.0.3}/langchain_google_genai/py.typed
RENAMED
|
File without changes
|