langchain-google-genai 1.0.7__tar.gz → 1.0.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-google-genai might be problematic. Click here for more details.

Files changed (17) hide show
  1. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/PKG-INFO +2 -2
  2. langchain_google_genai-1.0.9/langchain_google_genai/_function_utils.py +335 -0
  3. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/chat_models.py +307 -67
  4. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/embeddings.py +6 -6
  5. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/llms.py +4 -4
  6. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/pyproject.toml +3 -2
  7. langchain_google_genai-1.0.7/langchain_google_genai/_function_utils.py +0 -340
  8. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/LICENSE +0 -0
  9. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/README.md +0 -0
  10. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/__init__.py +0 -0
  11. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/_common.py +0 -0
  12. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/_enums.py +0 -0
  13. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/_genai_extension.py +0 -0
  14. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/_image_utils.py +0 -0
  15. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/genai_aqa.py +0 -0
  16. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/google_vector_store.py +0 -0
  17. {langchain_google_genai-1.0.7 → langchain_google_genai-1.0.9}/langchain_google_genai/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 1.0.7
3
+ Version: 1.0.9
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain-google
6
6
  License: MIT
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Provides-Extra: images
15
15
  Requires-Dist: google-generativeai (>=0.7.0,<0.8.0)
16
- Requires-Dist: langchain-core (>=0.2.9,<0.3)
16
+ Requires-Dist: langchain-core (>=0.2.32,<0.3)
17
17
  Requires-Dist: pillow (>=10.1.0,<11.0.0) ; extra == "images"
18
18
  Project-URL: Repository, https://github.com/langchain-ai/langchain-google
19
19
  Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
@@ -0,0 +1,335 @@
1
+ from __future__ import annotations
2
+
3
+ import collections
4
+ import json
5
+ import logging
6
+ from typing import (
7
+ Any,
8
+ Callable,
9
+ Collection,
10
+ Dict,
11
+ List,
12
+ Literal,
13
+ Optional,
14
+ Sequence,
15
+ Type,
16
+ TypedDict,
17
+ Union,
18
+ cast,
19
+ )
20
+
21
+ import google.ai.generativelanguage as glm
22
+ import google.ai.generativelanguage_v1beta.types as gapic
23
+ import proto # type: ignore[import]
24
+ from google.generativeai.types.content_types import ToolDict # type: ignore[import]
25
+ from langchain_core.pydantic_v1 import BaseModel
26
+ from langchain_core.tools import BaseTool
27
+ from langchain_core.tools import tool as callable_as_lc_tool
28
+ from langchain_core.utils.function_calling import (
29
+ FunctionDescription,
30
+ convert_to_openai_tool,
31
+ )
32
+ from langchain_core.utils.json_schema import dereference_refs
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+
37
+ TYPE_ENUM = {
38
+ "string": glm.Type.STRING,
39
+ "number": glm.Type.NUMBER,
40
+ "integer": glm.Type.INTEGER,
41
+ "boolean": glm.Type.BOOLEAN,
42
+ "array": glm.Type.ARRAY,
43
+ "object": glm.Type.OBJECT,
44
+ }
45
+
46
+ TYPE_ENUM_REVERSE = {v: k for k, v in TYPE_ENUM.items()}
47
+ _ALLOWED_SCHEMA_FIELDS = []
48
+ _ALLOWED_SCHEMA_FIELDS.extend([f.name for f in gapic.Schema()._pb.DESCRIPTOR.fields])
49
+ _ALLOWED_SCHEMA_FIELDS.extend(
50
+ [
51
+ f
52
+ for f in gapic.Schema.to_dict(
53
+ gapic.Schema(), preserving_proto_field_name=False
54
+ ).keys()
55
+ ]
56
+ )
57
+ _ALLOWED_SCHEMA_FIELDS_SET = set(_ALLOWED_SCHEMA_FIELDS)
58
+
59
+
60
+ class _ToolDictLike(TypedDict):
61
+ function_declarations: _FunctionDeclarationLikeList
62
+
63
+
64
+ class _FunctionDeclarationDict(TypedDict):
65
+ name: str
66
+ description: str
67
+ parameters: Dict[str, Collection[str]]
68
+
69
+
70
+ class _ToolDict(TypedDict):
71
+ function_declarations: Sequence[_FunctionDeclarationDict]
72
+
73
+
74
+ # Info: This is a FunctionDeclaration(=fc).
75
+ _FunctionDeclarationLike = Union[
76
+ BaseTool, Type[BaseModel], gapic.FunctionDeclaration, Callable, Dict[str, Any]
77
+ ]
78
+
79
+ # Info: This mean one tool.
80
+ _FunctionDeclarationLikeList = Sequence[_FunctionDeclarationLike]
81
+
82
+
83
+ # Info: This means one tool=Sequence of FunctionDeclaration
84
+ # The dict should be gapic.Tool like. {"function_declarations": [ { "name": ...}.
85
+ # OpenAI like dict is not be accepted. {{'type': 'function', 'function': {'name': ...}
86
+ _ToolsType = Union[
87
+ gapic.Tool,
88
+ ToolDict,
89
+ _ToolDictLike,
90
+ _FunctionDeclarationLikeList,
91
+ _FunctionDeclarationLike,
92
+ ]
93
+
94
+
95
+ def _format_json_schema_to_gapic(schema: Dict[str, Any]) -> Dict[str, Any]:
96
+ converted_schema: Dict[str, Any] = {}
97
+ for key, value in schema.items():
98
+ if key == "definitions":
99
+ continue
100
+ elif key == "items":
101
+ converted_schema["items"] = _format_json_schema_to_gapic(value)
102
+ elif key == "properties":
103
+ if "properties" not in converted_schema:
104
+ converted_schema["properties"] = {}
105
+ for pkey, pvalue in value.items():
106
+ converted_schema["properties"][pkey] = _format_json_schema_to_gapic(
107
+ pvalue
108
+ )
109
+ continue
110
+ elif key in ["type", "_type"]:
111
+ converted_schema["type"] = str(value).upper()
112
+ elif key not in _ALLOWED_SCHEMA_FIELDS_SET:
113
+ logger.warning(f"Key '{key}' is not supported in schema, ignoring")
114
+ else:
115
+ converted_schema[key] = value
116
+ return converted_schema
117
+
118
+
119
+ def _dict_to_gapic_schema(schema: Dict[str, Any]) -> Optional[gapic.Schema]:
120
+ if schema:
121
+ dereferenced_schema = dereference_refs(schema)
122
+ formatted_schema = _format_json_schema_to_gapic(dereferenced_schema)
123
+ json_schema = json.dumps(formatted_schema)
124
+ return gapic.Schema.from_json(json_schema)
125
+ return None
126
+
127
+
128
+ def _format_dict_to_function_declaration(
129
+ tool: Union[FunctionDescription, Dict[str, Any]],
130
+ ) -> gapic.FunctionDeclaration:
131
+ return gapic.FunctionDeclaration(
132
+ name=tool.get("name"),
133
+ description=tool.get("description"),
134
+ parameters=_dict_to_gapic_schema(tool.get("parameters", {})),
135
+ )
136
+
137
+
138
+ # Info: gapic.Tool means function_declarations and proto.Message.
139
+ def convert_to_genai_function_declarations(
140
+ tools: Sequence[_ToolsType],
141
+ ) -> gapic.Tool:
142
+ if not isinstance(tools, collections.abc.Sequence):
143
+ logger.warning(
144
+ "convert_to_genai_function_declarations expects a Sequence "
145
+ "and not a single tool."
146
+ )
147
+ tools = [tools]
148
+ gapic_tool = gapic.Tool()
149
+ for tool in tools:
150
+ if isinstance(tool, gapic.Tool):
151
+ gapic_tool.function_declarations.extend(tool.function_declarations)
152
+ elif isinstance(tool, dict):
153
+ if "function_declarations" not in tool:
154
+ fd = _format_to_gapic_function_declaration(tool)
155
+ gapic_tool.function_declarations.append(fd)
156
+ continue
157
+ tool = cast(_ToolDictLike, tool)
158
+ function_declarations = tool["function_declarations"]
159
+ if not isinstance(function_declarations, collections.abc.Sequence):
160
+ raise ValueError(
161
+ "function_declarations should be a list"
162
+ f"got '{type(function_declarations)}'"
163
+ )
164
+ if function_declarations:
165
+ fds = [
166
+ _format_to_gapic_function_declaration(fd)
167
+ for fd in function_declarations
168
+ ]
169
+ gapic_tool.function_declarations.extend(fds)
170
+ else:
171
+ fd = _format_to_gapic_function_declaration(tool)
172
+ gapic_tool.function_declarations.append(fd)
173
+ return gapic_tool
174
+
175
+
176
+ def tool_to_dict(tool: gapic.Tool) -> _ToolDict:
177
+ def _traverse_values(raw: Any) -> Any:
178
+ if isinstance(raw, list):
179
+ return [_traverse_values(v) for v in raw]
180
+ if isinstance(raw, dict):
181
+ return {k: _traverse_values(v) for k, v in raw.items()}
182
+ if isinstance(raw, proto.Message):
183
+ return _traverse_values(type(raw).to_dict(raw))
184
+ return raw
185
+
186
+ return _traverse_values(type(tool).to_dict(tool))
187
+
188
+
189
+ def _format_to_gapic_function_declaration(
190
+ tool: _FunctionDeclarationLike,
191
+ ) -> gapic.FunctionDeclaration:
192
+ if isinstance(tool, BaseTool):
193
+ return _format_base_tool_to_function_declaration(tool)
194
+ elif isinstance(tool, type) and issubclass(tool, BaseModel):
195
+ return _convert_pydantic_to_genai_function(tool)
196
+ elif isinstance(tool, dict):
197
+ if all(k in tool for k in ("name", "description")) and "parameters" not in tool:
198
+ function = cast(dict, tool)
199
+ function["parameters"] = {}
200
+ else:
201
+ if "parameters" in tool and tool["parameters"].get("properties"):
202
+ function = convert_to_openai_tool(cast(dict, tool))["function"]
203
+ else:
204
+ function = cast(dict, tool)
205
+ function["parameters"] = {}
206
+ return _format_dict_to_function_declaration(cast(FunctionDescription, function))
207
+ elif callable(tool):
208
+ return _format_base_tool_to_function_declaration(callable_as_lc_tool()(tool))
209
+ raise ValueError(f"Unsupported tool type {tool}")
210
+
211
+
212
+ def _format_base_tool_to_function_declaration(
213
+ tool: BaseTool,
214
+ ) -> gapic.FunctionDeclaration:
215
+ if not tool.args_schema:
216
+ return gapic.FunctionDeclaration(
217
+ name=tool.name,
218
+ description=tool.description,
219
+ parameters=gapic.Schema(
220
+ type=gapic.Type.OBJECT,
221
+ properties={
222
+ "__arg1": gapic.Schema(type=gapic.Type.STRING),
223
+ },
224
+ required=["__arg1"],
225
+ ),
226
+ )
227
+
228
+ schema = tool.args_schema.schema()
229
+ parameters = _dict_to_gapic_schema(schema)
230
+
231
+ return gapic.FunctionDeclaration(
232
+ name=tool.name or schema.get("title"),
233
+ description=tool.description or schema.get("description"),
234
+ parameters=parameters,
235
+ )
236
+
237
+
238
+ def _convert_pydantic_to_genai_function(
239
+ pydantic_model: Type[BaseModel],
240
+ tool_name: Optional[str] = None,
241
+ tool_description: Optional[str] = None,
242
+ ) -> gapic.FunctionDeclaration:
243
+ schema = dereference_refs(pydantic_model.schema())
244
+ schema.pop("definitions", None)
245
+ function_declaration = gapic.FunctionDeclaration(
246
+ name=tool_name if tool_name else schema.get("title"),
247
+ description=tool_description if tool_description else schema.get("description"),
248
+ parameters={
249
+ "properties": {
250
+ k: {
251
+ "type_": _get_type_from_schema(v),
252
+ "description": v.get("description"),
253
+ }
254
+ for k, v in schema["properties"].items()
255
+ },
256
+ "required": schema.get("required", []),
257
+ "type_": TYPE_ENUM[schema["type"]],
258
+ },
259
+ )
260
+ return function_declaration
261
+
262
+
263
+ def _get_type_from_schema(schema: Dict[str, Any]) -> int:
264
+ if "anyOf" in schema:
265
+ types = [_get_type_from_schema(sub_schema) for sub_schema in schema["anyOf"]]
266
+ types = [t for t in types if t is not None] # Remove None values
267
+ if types:
268
+ return types[-1] # TODO: update FunctionDeclaration and pass all types?
269
+ else:
270
+ pass
271
+ elif "type" in schema:
272
+ stype = str(schema["type"])
273
+ if stype in TYPE_ENUM:
274
+ return TYPE_ENUM[stype]
275
+ else:
276
+ pass
277
+ else:
278
+ pass
279
+ return TYPE_ENUM["string"] # Default to string if no valid types found
280
+
281
+
282
+ _ToolChoiceType = Union[
283
+ dict, List[str], str, Literal["auto", "none", "any"], Literal[True]
284
+ ]
285
+
286
+
287
+ class _FunctionCallingConfigDict(TypedDict):
288
+ mode: Union[gapic.FunctionCallingConfig.Mode, str]
289
+ allowed_function_names: Optional[List[str]]
290
+
291
+
292
+ class _ToolConfigDict(TypedDict):
293
+ function_calling_config: _FunctionCallingConfigDict
294
+
295
+
296
+ def _tool_choice_to_tool_config(
297
+ tool_choice: _ToolChoiceType,
298
+ all_names: List[str],
299
+ ) -> _ToolConfigDict:
300
+ allowed_function_names: Optional[List[str]] = None
301
+ if tool_choice is True or tool_choice == "any":
302
+ mode = "ANY"
303
+ allowed_function_names = all_names
304
+ elif tool_choice == "auto":
305
+ mode = "AUTO"
306
+ elif tool_choice == "none":
307
+ mode = "NONE"
308
+ elif isinstance(tool_choice, str):
309
+ mode = "ANY"
310
+ allowed_function_names = [tool_choice]
311
+ elif isinstance(tool_choice, list):
312
+ mode = "ANY"
313
+ allowed_function_names = tool_choice
314
+ elif isinstance(tool_choice, dict):
315
+ if "mode" in tool_choice:
316
+ mode = tool_choice["mode"]
317
+ allowed_function_names = tool_choice.get("allowed_function_names")
318
+ elif "function_calling_config" in tool_choice:
319
+ mode = tool_choice["function_calling_config"]["mode"]
320
+ allowed_function_names = tool_choice["function_calling_config"].get(
321
+ "allowed_function_names"
322
+ )
323
+ else:
324
+ raise ValueError(
325
+ f"Unrecognized tool choice format:\n\n{tool_choice=}\n\nShould match "
326
+ f"Google GenerativeAI ToolConfig or FunctionCallingConfig format."
327
+ )
328
+ else:
329
+ raise ValueError(f"Unrecognized tool choice format:\n\n{tool_choice=}")
330
+ return _ToolConfigDict(
331
+ function_calling_config={
332
+ "mode": mode.upper(),
333
+ "allowed_function_names": allowed_function_names,
334
+ }
335
+ )
@@ -32,8 +32,10 @@ import google.api_core
32
32
  import proto # type: ignore[import]
33
33
  import requests
34
34
  from google.ai.generativelanguage_v1beta.types import (
35
+ Blob,
35
36
  Candidate,
36
37
  Content,
38
+ FileData,
37
39
  FunctionCall,
38
40
  FunctionResponse,
39
41
  GenerateContentRequest,
@@ -42,6 +44,7 @@ from google.ai.generativelanguage_v1beta.types import (
42
44
  Part,
43
45
  SafetySetting,
44
46
  ToolConfig,
47
+ VideoMetadata,
45
48
  )
46
49
  from google.generativeai.types import Tool as GoogleTool # type: ignore[import]
47
50
  from google.generativeai.types.content_types import ( # type: ignore[import]
@@ -60,13 +63,11 @@ from langchain_core.messages import (
60
63
  BaseMessage,
61
64
  FunctionMessage,
62
65
  HumanMessage,
63
- InvalidToolCall,
64
66
  SystemMessage,
65
- ToolCall,
66
- ToolCallChunk,
67
67
  ToolMessage,
68
68
  )
69
69
  from langchain_core.messages.ai import UsageMetadata
70
+ from langchain_core.messages.tool import invalid_tool_call, tool_call, tool_call_chunk
70
71
  from langchain_core.output_parsers.base import OutputParserLike
71
72
  from langchain_core.output_parsers.openai_tools import (
72
73
  JsonOutputToolsParser,
@@ -77,6 +78,7 @@ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResu
77
78
  from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
78
79
  from langchain_core.runnables import Runnable, RunnablePassthrough
79
80
  from langchain_core.utils import get_from_dict_or_env
81
+ from langchain_core.utils.pydantic import is_basemodel_subclass
80
82
  from tenacity import (
81
83
  before_sleep_log,
82
84
  retry,
@@ -140,7 +142,7 @@ def _create_retry_decorator() -> Callable[[Any], Any]:
140
142
  multiplier = 2
141
143
  min_seconds = 1
142
144
  max_seconds = 60
143
- max_retries = 10
145
+ max_retries = 2
144
146
 
145
147
  return retry(
146
148
  reraise=True,
@@ -326,8 +328,35 @@ def _convert_to_parts(
326
328
  )
327
329
  img_url = img_url["url"]
328
330
  parts.append(image_loader.load_part(img_url))
331
+ # Handle media type like LangChain.js
332
+ # https://github.com/langchain-ai/langchainjs/blob/e536593e2585f1dd7b0afc187de4d07cb40689ba/libs/langchain-google-common/src/utils/gemini.ts#L93-L106
333
+ elif part["type"] == "media":
334
+ if "mime_type" not in part:
335
+ raise ValueError(f"Missing mime_type in media part: {part}")
336
+ mime_type = part["mime_type"]
337
+ media_part = Part()
338
+
339
+ if "data" in part:
340
+ media_part.inline_data = Blob(
341
+ data=part["data"], mime_type=mime_type
342
+ )
343
+ elif "file_uri" in part:
344
+ media_part.file_data = FileData(
345
+ file_uri=part["file_uri"], mime_type=mime_type
346
+ )
347
+ else:
348
+ raise ValueError(
349
+ f"Media part must have either data or file_uri: {part}"
350
+ )
351
+ if "video_metadata" in part:
352
+ metadata = VideoMetadata(part["video_metadata"])
353
+ media_part.video_metadata = metadata
354
+ parts.append(media_part)
329
355
  else:
330
- raise ValueError(f"Unrecognized message part type: {part['type']}")
356
+ raise ValueError(
357
+ f"Unrecognized message part type: {part['type']}. Only text, "
358
+ f"image_url, and media types are supported."
359
+ )
331
360
  else:
332
361
  # Yolo
333
362
  logger.warning(
@@ -466,9 +495,6 @@ def _parse_response_candidate(
466
495
  raise Exception("Unexpected content type")
467
496
 
468
497
  if part.function_call:
469
- # TODO: support multiple function calls
470
- if "function_call" in additional_kwargs:
471
- raise Exception("Multiple function calls are not currently supported")
472
498
  function_call = {"name": part.function_call.name}
473
499
  # dump to match other function calling llm for now
474
500
  function_call_args_dict = proto.Message.to_dict(part.function_call)["args"]
@@ -479,7 +505,7 @@ def _parse_response_candidate(
479
505
 
480
506
  if streaming:
481
507
  tool_call_chunks.append(
482
- ToolCallChunk(
508
+ tool_call_chunk(
483
509
  name=function_call.get("name"),
484
510
  args=function_call.get("arguments"),
485
511
  id=function_call.get("id", str(uuid.uuid4())),
@@ -488,27 +514,27 @@ def _parse_response_candidate(
488
514
  )
489
515
  else:
490
516
  try:
491
- tool_calls_dicts = parse_tool_calls(
517
+ tool_call_dict = parse_tool_calls(
492
518
  [{"function": function_call}],
493
519
  return_id=False,
494
- )
495
- tool_calls = [
496
- ToolCall(
497
- name=tool_call["name"],
498
- args=tool_call["args"],
499
- id=tool_call.get("id", str(uuid.uuid4())),
500
- )
501
- for tool_call in tool_calls_dicts
502
- ]
520
+ )[0]
503
521
  except Exception as e:
504
- invalid_tool_calls = [
505
- InvalidToolCall(
522
+ invalid_tool_calls.append(
523
+ invalid_tool_call(
506
524
  name=function_call.get("name"),
507
525
  args=function_call.get("arguments"),
508
526
  id=function_call.get("id", str(uuid.uuid4())),
509
527
  error=str(e),
510
528
  )
511
- ]
529
+ )
530
+ else:
531
+ tool_calls.append(
532
+ tool_call(
533
+ name=tool_call_dict["name"],
534
+ args=tool_call_dict["args"],
535
+ id=tool_call_dict.get("id", str(uuid.uuid4())),
536
+ )
537
+ )
512
538
  if content is None:
513
539
  content = ""
514
540
 
@@ -594,33 +620,215 @@ def _is_event_loop_running() -> bool:
594
620
 
595
621
 
596
622
  class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
597
- """`Google Generative AI` Chat models API.
623
+ """`Google AI` chat models integration.
598
624
 
599
- To use, you must have either:
625
+ Instantiation:
626
+ To use, you must have either:
600
627
 
601
- 1. The ``GOOGLE_API_KEY``` environment variable set with your API key, or
602
- 2. Pass your API key using the google_api_key kwarg to the ChatGoogle
603
- constructor.
628
+ 1. The ``GOOGLE_API_KEY``` environment variable set with your API key, or
629
+ 2. Pass your API key using the google_api_key kwarg to the ChatGoogle
630
+ constructor.
604
631
 
605
- Example:
606
632
  .. code-block:: python
607
633
 
608
634
  from langchain_google_genai import ChatGoogleGenerativeAI
609
- chat = ChatGoogleGenerativeAI(model="gemini-pro")
610
- chat.invoke("Write me a ballad about LangChain")
611
635
 
612
- """
636
+ llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
637
+ llm.invoke("Write me a ballad about LangChain")
638
+
639
+ Invoke:
640
+ .. code-block:: python
641
+
642
+ messages = [
643
+ ("system", "Translate the user sentence to French."),
644
+ ("human", "I love programming."),
645
+ ]
646
+ llm.invoke(messages)
647
+
648
+ .. code-block:: python
649
+
650
+ AIMessage(
651
+ content="J'adore programmer. \\n",
652
+ response_metadata={'prompt_feedback': {'block_reason': 0, 'safety_ratings': []}, 'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]},
653
+ id='run-56cecc34-2e54-4b52-a974-337e47008ad2-0',
654
+ usage_metadata={'input_tokens': 18, 'output_tokens': 5, 'total_tokens': 23}
655
+ )
656
+
657
+ Stream:
658
+ .. code-block:: python
659
+
660
+ for chunk in llm.stream(messages):
661
+ print(chunk)
662
+
663
+ .. code-block:: python
664
+
665
+ AIMessageChunk(content='J', response_metadata={'finish_reason': 'STOP', 'safety_ratings': []}, id='run-e905f4f4-58cb-4a10-a960-448a2bb649e3', usage_metadata={'input_tokens': 18, 'output_tokens': 1, 'total_tokens': 19})
666
+ AIMessageChunk(content="'adore programmer. \n", response_metadata={'finish_reason': 'STOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]}, id='run-e905f4f4-58cb-4a10-a960-448a2bb649e3', usage_metadata={'input_tokens': 18, 'output_tokens': 5, 'total_tokens': 23})
667
+
668
+ .. code-block:: python
669
+
670
+ stream = llm.stream(messages)
671
+ full = next(stream)
672
+ for chunk in stream:
673
+ full += chunk
674
+ full
675
+
676
+ .. code-block:: python
677
+
678
+ AIMessageChunk(
679
+ content="J'adore programmer. \\n",
680
+ response_metadata={'finish_reason': 'STOPSTOP', 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]},
681
+ id='run-3ce13a42-cd30-4ad7-a684-f1f0b37cdeec',
682
+ usage_metadata={'input_tokens': 36, 'output_tokens': 6, 'total_tokens': 42}
683
+ )
684
+
685
+ Async:
686
+ .. code-block:: python
687
+
688
+ await llm.ainvoke(messages)
689
+
690
+ # stream:
691
+ # async for chunk in (await llm.astream(messages))
692
+
693
+ # batch:
694
+ # await llm.abatch([messages])
695
+
696
+ Tool calling:
697
+ .. code-block:: python
698
+
699
+ from langchain_core.pydantic_v1 import BaseModel, Field
700
+
701
+
702
+ class GetWeather(BaseModel):
703
+ '''Get the current weather in a given location'''
704
+
705
+ location: str = Field(
706
+ ..., description="The city and state, e.g. San Francisco, CA"
707
+ )
708
+
709
+
710
+ class GetPopulation(BaseModel):
711
+ '''Get the current population in a given location'''
712
+
713
+ location: str = Field(
714
+ ..., description="The city and state, e.g. San Francisco, CA"
715
+ )
716
+
717
+
718
+ llm_with_tools = llm.bind_tools([GetWeather, GetPopulation])
719
+ ai_msg = llm_with_tools.invoke(
720
+ "Which city is hotter today and which is bigger: LA or NY?"
721
+ )
722
+ ai_msg.tool_calls
723
+
724
+ .. code-block:: python
725
+
726
+ [{'name': 'GetWeather',
727
+ 'args': {'location': 'Los Angeles, CA'},
728
+ 'id': 'c186c99f-f137-4d52-947f-9e3deabba6f6'},
729
+ {'name': 'GetWeather',
730
+ 'args': {'location': 'New York City, NY'},
731
+ 'id': 'cebd4a5d-e800-4fa5-babd-4aa286af4f31'},
732
+ {'name': 'GetPopulation',
733
+ 'args': {'location': 'Los Angeles, CA'},
734
+ 'id': '4f92d897-f5e4-4d34-a3bc-93062c92591e'},
735
+ {'name': 'GetPopulation',
736
+ 'args': {'location': 'New York City, NY'},
737
+ 'id': '634582de-5186-4e4b-968b-f192f0a93678'}]
738
+
739
+ Structured output:
740
+ .. code-block:: python
741
+
742
+ from typing import Optional
743
+
744
+ from langchain_core.pydantic_v1 import BaseModel, Field
745
+
746
+
747
+ class Joke(BaseModel):
748
+ '''Joke to tell user.'''
749
+
750
+ setup: str = Field(description="The setup of the joke")
751
+ punchline: str = Field(description="The punchline to the joke")
752
+ rating: Optional[int] = Field(description="How funny the joke is, from 1 to 10")
613
753
 
614
- client: Any #: :meta private:
615
- async_client: Any #: :meta private:
754
+
755
+ structured_llm = llm.with_structured_output(Joke)
756
+ structured_llm.invoke("Tell me a joke about cats")
757
+
758
+ .. code-block:: python
759
+
760
+ Joke(
761
+ setup='Why are cats so good at video games?',
762
+ punchline='They have nine lives on the internet',
763
+ rating=None
764
+ )
765
+
766
+ Image input:
767
+ .. code-block:: python
768
+
769
+ import base64
770
+ import httpx
771
+ from langchain_core.messages import HumanMessage
772
+
773
+ image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
774
+ image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")
775
+ message = HumanMessage(
776
+ content=[
777
+ {"type": "text", "text": "describe the weather in this image"},
778
+ {
779
+ "type": "image_url",
780
+ "image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
781
+ },
782
+ ]
783
+ )
784
+ ai_msg = llm.invoke([message])
785
+ ai_msg.content
786
+
787
+ .. code-block:: python
788
+
789
+ 'The weather in this image appears to be sunny and pleasant. The sky is a bright blue with scattered white clouds, suggesting fair weather. The lush green grass and trees indicate a warm and possibly slightly breezy day. There are no signs of rain or storms. \n'
790
+
791
+ Token usage:
792
+ .. code-block:: python
793
+
794
+ ai_msg = llm.invoke(messages)
795
+ ai_msg.usage_metadata
796
+
797
+ .. code-block:: python
798
+
799
+ {'input_tokens': 18, 'output_tokens': 5, 'total_tokens': 23}
800
+
801
+
802
+ Response metadata
803
+ .. code-block:: python
804
+
805
+ ai_msg = llm.invoke(messages)
806
+ ai_msg.response_metadata
807
+
808
+ .. code-block:: python
809
+
810
+ {
811
+ 'prompt_feedback': {'block_reason': 0, 'safety_ratings': []},
812
+ 'finish_reason': 'STOP',
813
+ 'safety_ratings': [{'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability': 'NEGLIGIBLE', 'blocked': False}]
814
+ }
815
+
816
+ """ # noqa: E501
817
+
818
+ client: Any = Field(default=None, exclude=True) #: :meta private:
819
+ async_client: Any = Field(default=None, exclude=True) #: :meta private:
820
+ google_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
821
+ """Google AI API key.
822
+
823
+ If not specified will be read from env var ``GOOGLE_API_KEY``."""
616
824
  default_metadata: Sequence[Tuple[str, str]] = Field(
617
825
  default_factory=list
618
826
  ) #: :meta private:
619
827
 
620
828
  convert_system_message_to_human: bool = False
621
829
  """Whether to merge any leading SystemMessage into the following HumanMessage.
622
-
623
- Gemini does not support system messages; any unsupported messages will
830
+
831
+ Gemini does not support system messages; any unsupported messages will
624
832
  raise an error."""
625
833
 
626
834
  class Config:
@@ -786,9 +994,18 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
786
994
  **kwargs: Any,
787
995
  ) -> ChatResult:
788
996
  if not self.async_client:
789
- raise RuntimeError(
790
- "Initialize ChatGoogleGenerativeAI with a running event loop "
791
- "to use async methods."
997
+ updated_kwargs = {
998
+ **kwargs,
999
+ **{
1000
+ "tools": tools,
1001
+ "functions": functions,
1002
+ "safety_settings": safety_settings,
1003
+ "tool_config": tool_config,
1004
+ "generation_config": generation_config,
1005
+ },
1006
+ }
1007
+ return await super()._agenerate(
1008
+ messages, stop, run_manager, **updated_kwargs
792
1009
  )
793
1010
 
794
1011
  request = self._prepare_request(
@@ -857,27 +1074,43 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
857
1074
  generation_config: Optional[Dict[str, Any]] = None,
858
1075
  **kwargs: Any,
859
1076
  ) -> AsyncIterator[ChatGenerationChunk]:
860
- request = self._prepare_request(
861
- messages,
862
- stop=stop,
863
- tools=tools,
864
- functions=functions,
865
- safety_settings=safety_settings,
866
- tool_config=tool_config,
867
- generation_config=generation_config,
868
- )
869
- async for chunk in await _achat_with_retry(
870
- request=request,
871
- generation_method=self.async_client.stream_generate_content,
872
- **kwargs,
873
- metadata=self.default_metadata,
874
- ):
875
- _chat_result = _response_to_result(chunk, stream=True)
876
- gen = cast(ChatGenerationChunk, _chat_result.generations[0])
1077
+ if not self.async_client:
1078
+ updated_kwargs = {
1079
+ **kwargs,
1080
+ **{
1081
+ "tools": tools,
1082
+ "functions": functions,
1083
+ "safety_settings": safety_settings,
1084
+ "tool_config": tool_config,
1085
+ "generation_config": generation_config,
1086
+ },
1087
+ }
1088
+ async for value in super()._astream(
1089
+ messages, stop, run_manager, **updated_kwargs
1090
+ ):
1091
+ yield value
1092
+ else:
1093
+ request = self._prepare_request(
1094
+ messages,
1095
+ stop=stop,
1096
+ tools=tools,
1097
+ functions=functions,
1098
+ safety_settings=safety_settings,
1099
+ tool_config=tool_config,
1100
+ generation_config=generation_config,
1101
+ )
1102
+ async for chunk in await _achat_with_retry(
1103
+ request=request,
1104
+ generation_method=self.async_client.stream_generate_content,
1105
+ **kwargs,
1106
+ metadata=self.default_metadata,
1107
+ ):
1108
+ _chat_result = _response_to_result(chunk, stream=True)
1109
+ gen = cast(ChatGenerationChunk, _chat_result.generations[0])
877
1110
 
878
- if run_manager:
879
- await run_manager.on_llm_new_token(gen.text)
880
- yield gen
1111
+ if run_manager:
1112
+ await run_manager.on_llm_new_token(gen.text)
1113
+ yield gen
881
1114
 
882
1115
  def _prepare_request(
883
1116
  self,
@@ -892,9 +1125,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
892
1125
  ) -> Tuple[GenerateContentRequest, Dict[str, Any]]:
893
1126
  formatted_tools = None
894
1127
  if tools:
895
- formatted_tools = [
896
- convert_to_genai_function_declarations(tool) for tool in tools
897
- ]
1128
+ formatted_tools = [convert_to_genai_function_declarations(tools)]
898
1129
  elif functions:
899
1130
  formatted_tools = [convert_to_genai_function_declarations(functions)]
900
1131
 
@@ -953,13 +1184,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
953
1184
  ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
954
1185
  if kwargs:
955
1186
  raise ValueError(f"Received unsupported arguments {kwargs}")
956
- if isinstance(schema, type) and issubclass(schema, BaseModel):
1187
+ if isinstance(schema, type) and is_basemodel_subclass(schema):
957
1188
  parser: OutputParserLike = PydanticToolsParser(
958
1189
  tools=[schema], first_tool_only=True
959
1190
  )
960
1191
  else:
961
1192
  parser = JsonOutputToolsParser()
962
- llm = self.bind_tools([schema], tool_choice=False)
1193
+ tool_choice = _get_tool_name(schema) if self._supports_tool_choice else None
1194
+ llm = self.bind_tools([schema], tool_choice=tool_choice)
963
1195
  if include_raw:
964
1196
  parser_with_fallback = RunnablePassthrough.assign(
965
1197
  parsed=itemgetter("raw") | parser, parsing_error=lambda _: None
@@ -997,9 +1229,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
997
1229
  f"both:\n\n{tool_choice=}\n\n{tool_config=}"
998
1230
  )
999
1231
  # Bind dicts for easier serialization/deserialization.
1000
- genai_tools = [
1001
- tool_to_dict(convert_to_genai_function_declarations(tool)) for tool in tools
1002
- ]
1232
+ genai_tools = [tool_to_dict(convert_to_genai_function_declarations(tools))]
1003
1233
  if tool_choice:
1004
1234
  all_names = [
1005
1235
  f["name"] # type: ignore[index]
@@ -1007,5 +1237,15 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
1007
1237
  for f in t["function_declarations"]
1008
1238
  ]
1009
1239
  tool_config = _tool_choice_to_tool_config(tool_choice, all_names)
1010
-
1011
1240
  return self.bind(tools=genai_tools, tool_config=tool_config, **kwargs)
1241
+
1242
+ @property
1243
+ def _supports_tool_choice(self) -> bool:
1244
+ return "gemini-1.5-pro" in self.model
1245
+
1246
+
1247
+ def _get_tool_name(
1248
+ tool: Union[ToolDict, GoogleTool],
1249
+ ) -> str:
1250
+ genai_tool = tool_to_dict(convert_to_genai_function_declarations([tool]))
1251
+ return [f["name"] for f in genai_tool["function_declarations"]][0] # type: ignore[index]
@@ -39,20 +39,20 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
39
39
  embeddings.embed_query("What's our Q1 revenue?")
40
40
  """
41
41
 
42
- client: Any #: :meta private:
42
+ client: Any = None #: :meta private:
43
43
  model: str = Field(
44
44
  ...,
45
45
  description="The name of the embedding model to use. "
46
46
  "Example: models/embedding-001",
47
47
  )
48
48
  task_type: Optional[str] = Field(
49
- None,
49
+ default=None,
50
50
  description="The task type. Valid options include: "
51
51
  "task_type_unspecified, retrieval_query, retrieval_document, "
52
52
  "semantic_similarity, classification, and clustering",
53
53
  )
54
54
  google_api_key: Optional[SecretStr] = Field(
55
- None,
55
+ default=None,
56
56
  description="The Google API key to use. If not provided, "
57
57
  "the GOOGLE_API_KEY environment variable will be used.",
58
58
  )
@@ -64,18 +64,18 @@ class GoogleGenerativeAIEmbeddings(BaseModel, Embeddings):
64
64
  "provided, credentials will be ascertained from the GOOGLE_API_KEY envvar",
65
65
  )
66
66
  client_options: Optional[Dict] = Field(
67
- None,
67
+ default=None,
68
68
  description=(
69
69
  "A dictionary of client options to pass to the Google API client, "
70
70
  "such as `api_endpoint`."
71
71
  ),
72
72
  )
73
73
  transport: Optional[str] = Field(
74
- None,
74
+ default=None,
75
75
  description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
76
76
  )
77
77
  request_options: Optional[Dict] = Field(
78
- None,
78
+ default=None,
79
79
  description="A dictionary of request options to pass to the Google API client."
80
80
  "Example: `{'timeout': 10}`",
81
81
  )
@@ -149,18 +149,18 @@ Supported examples:
149
149
  """The maximum number of seconds to wait for a response."""
150
150
 
151
151
  client_options: Optional[Dict] = Field(
152
- None,
152
+ default=None,
153
153
  description=(
154
154
  "A dictionary of client options to pass to the Google API client, "
155
155
  "such as `api_endpoint`."
156
156
  ),
157
157
  )
158
158
  transport: Optional[str] = Field(
159
- None,
159
+ default=None,
160
160
  description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
161
161
  )
162
162
  additional_headers: Optional[Dict[str, str]] = Field(
163
- None,
163
+ default=None,
164
164
  description=(
165
165
  "A key-value dictionary representing additional headers for the model call"
166
166
  ),
@@ -212,7 +212,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
212
212
  llm = GoogleGenerativeAI(model="gemini-pro")
213
213
  """
214
214
 
215
- client: Any #: :meta private:
215
+ client: Any = None #: :meta private:
216
216
 
217
217
  @root_validator()
218
218
  def validate_environment(cls, values: Dict) -> Dict:
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langchain-google-genai"
3
- version = "1.0.7"
3
+ version = "1.0.9"
4
4
  description = "An integration package connecting Google's genai package and LangChain"
5
5
  authors = []
6
6
  readme = "README.md"
@@ -12,7 +12,7 @@ license = "MIT"
12
12
 
13
13
  [tool.poetry.dependencies]
14
14
  python = ">=3.9,<4.0"
15
- langchain-core = ">=0.2.9,<0.3"
15
+ langchain-core = ">=0.2.32,<0.3"
16
16
  google-generativeai = "^0.7.0"
17
17
  pillow = { version = "^10.1.0", optional = true }
18
18
 
@@ -31,6 +31,7 @@ pytest-watcher = "^0.3.4"
31
31
  pytest-asyncio = "^0.21.1"
32
32
  numpy = "^1.26.2"
33
33
  langchain-core = { git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/core" }
34
+ langchain-standard-tests = { git = "https://github.com/langchain-ai/langchain.git", subdirectory = "libs/standard-tests" }
34
35
 
35
36
  [tool.codespell]
36
37
  ignore-words-list = "rouge"
@@ -1,340 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import (
4
- Any,
5
- Callable,
6
- Collection,
7
- Dict,
8
- List,
9
- Literal,
10
- Optional,
11
- Sequence,
12
- Type,
13
- TypedDict,
14
- Union,
15
- cast,
16
- )
17
-
18
- import google.ai.generativelanguage as glm
19
- from google.ai.generativelanguage import FunctionCallingConfig, FunctionDeclaration
20
- from google.ai.generativelanguage import Tool as GoogleTool
21
- from google.generativeai.types.content_types import ToolDict # type: ignore[import]
22
- from langchain_core.pydantic_v1 import BaseModel
23
- from langchain_core.tools import BaseTool
24
- from langchain_core.tools import tool as callable_as_lc_tool
25
- from langchain_core.utils.json_schema import dereference_refs
26
-
27
- TYPE_ENUM = {
28
- "string": glm.Type.STRING,
29
- "number": glm.Type.NUMBER,
30
- "integer": glm.Type.INTEGER,
31
- "boolean": glm.Type.BOOLEAN,
32
- "array": glm.Type.ARRAY,
33
- "object": glm.Type.OBJECT,
34
- }
35
-
36
- TYPE_ENUM_REVERSE = {v: k for k, v in TYPE_ENUM.items()}
37
-
38
-
39
- class _ToolDictLike(TypedDict):
40
- function_declarations: _FunctionDeclarationLikeList
41
-
42
-
43
- class _FunctionDeclarationDict(TypedDict):
44
- name: str
45
- description: str
46
- parameters: Dict[str, Collection[str]]
47
-
48
-
49
- class _ToolDict(TypedDict):
50
- function_declarations: Sequence[_FunctionDeclarationDict]
51
-
52
-
53
- # Info: This is a FunctionDeclaration(=fc).
54
- _FunctionDeclarationLike = Union[
55
- BaseTool, Type[BaseModel], FunctionDeclaration, Callable, Dict[str, Any]
56
- ]
57
-
58
- # Info: This mean one tool.
59
- _FunctionDeclarationLikeList = Sequence[_FunctionDeclarationLike]
60
-
61
-
62
- # Info: This means one tool=Sequence of FunctionDeclaration
63
- # The dict should be GoogleTool like. {"function_declarations": [ { "name": ...}.
64
- # OpenAI like dict is not be accepted. {{'type': 'function', 'function': {'name': ...}
65
- _ToolsType = Union[
66
- GoogleTool,
67
- ToolDict,
68
- _ToolDictLike,
69
- _FunctionDeclarationLikeList,
70
- _FunctionDeclarationLike,
71
- ]
72
-
73
-
74
- #
75
- # Info: GoogleTool means function_declarations and proto.Message.
76
- def convert_to_genai_function_declarations(
77
- tool: _ToolsType,
78
- ) -> GoogleTool:
79
- if isinstance(tool, list):
80
- # multiple _FunctionDeclarationLike
81
- return GoogleTool(
82
- function_declarations=_convert_fc_likes_to_genai_function(tool)
83
- )
84
- elif isinstance(tool, (BaseTool, FunctionDeclaration)):
85
- # single _FunctionDeclarationLike
86
- return GoogleTool(
87
- function_declarations=[_convert_fc_like_to_genai_function(tool)]
88
- )
89
- elif isinstance(tool, type) and issubclass(tool, BaseModel):
90
- # single _FunctionDeclarationLike
91
- return GoogleTool(
92
- function_declarations=[_convert_fc_like_to_genai_function(tool)]
93
- )
94
- elif isinstance(tool, GoogleTool):
95
- return cast(GoogleTool, tool)
96
- elif callable(tool):
97
- return GoogleTool(
98
- function_declarations=[
99
- _convert_tool_to_genai_function(callable_as_lc_tool()(tool))
100
- ]
101
- )
102
- elif isinstance(tool, dict):
103
- return GoogleTool(function_declarations=_convert_dict_to_genai_functions(tool)) # type: ignore
104
- else:
105
- raise ValueError(f"Unsupported tool type {tool}")
106
-
107
-
108
- def tool_to_dict(tool: GoogleTool) -> _ToolDict:
109
- function_declarations = []
110
- for function_declaration_proto in tool.function_declarations:
111
- properties: Dict[str, Any] = {}
112
- for property in function_declaration_proto.parameters.properties:
113
- property_type = function_declaration_proto.parameters.properties[
114
- property
115
- ].type
116
- property_dict = {"type": TYPE_ENUM_REVERSE[property_type]}
117
- property_description = function_declaration_proto.parameters.properties[
118
- property
119
- ].description
120
- if property_description:
121
- property_dict["description"] = property_description
122
- properties[property] = property_dict
123
- name = function_declaration_proto.name
124
- description = function_declaration_proto.description
125
- parameters = {"type": "object", "properties": properties}
126
- if function_declaration_proto.parameters.required:
127
- parameters["required"] = function_declaration_proto.parameters.required
128
- function_declaration = _FunctionDeclarationDict(
129
- name=name, description=description, parameters=parameters
130
- )
131
- function_declarations.append(function_declaration)
132
- return {"function_declarations": function_declarations}
133
-
134
-
135
- def _convert_fc_likes_to_genai_function(
136
- fc_likes: _FunctionDeclarationLikeList,
137
- ) -> Sequence[FunctionDeclaration]:
138
- if isinstance(fc_likes, list):
139
- return [_convert_fc_like_to_genai_function(fc) for fc in fc_likes]
140
- raise ValueError(f"Unsupported fc_likes type {fc_likes}")
141
-
142
-
143
- def _convert_fc_like_to_genai_function(
144
- fc_like: _FunctionDeclarationLike,
145
- ) -> FunctionDeclaration:
146
- if isinstance(fc_like, BaseTool):
147
- return _convert_tool_to_genai_function(fc_like)
148
- elif isinstance(fc_like, type) and issubclass(fc_like, BaseModel):
149
- return _convert_pydantic_to_genai_function(fc_like)
150
- elif isinstance(fc_like, dict):
151
- # TODO: add declaration_index
152
- return _convert_dict_to_genai_function(fc_like)
153
- elif callable(fc_like):
154
- return _convert_tool_to_genai_function(callable_as_lc_tool()(fc_like))
155
- else:
156
- raise ValueError(f"Unsupported fc_like type {fc_like}")
157
-
158
-
159
- def _convert_tool_dict_to_genai_functions(
160
- tool_dict: _ToolDictLike,
161
- ) -> Sequence[FunctionDeclaration]:
162
- if "function_declarations" in tool_dict:
163
- return _convert_dicts_to_genai_functions(tool_dict["function_declarations"]) # type: ignore
164
- else:
165
- raise ValueError(f"Unsupported function tool_dict type {tool_dict}")
166
-
167
-
168
- def _convert_dict_to_genai_functions(
169
- function_declarations_dict: Dict[str, Any],
170
- ) -> Sequence[FunctionDeclaration]:
171
- if "function_declarations" in function_declarations_dict:
172
- # GoogleTool like
173
- return [
174
- _convert_dict_to_genai_function(fc, i)
175
- for i, fc in enumerate(function_declarations_dict["function_declarations"])
176
- ]
177
- d = function_declarations_dict
178
- if "name" in d and "description" in d and "parameters" in d:
179
- # _FunctionDeclarationDict
180
- return [_convert_dict_to_genai_function(d)]
181
- else:
182
- # OpenAI like?
183
- raise ValueError(f"Unsupported function call type {function_declarations_dict}")
184
-
185
-
186
- def _convert_dicts_to_genai_functions(
187
- function_declaration_dicts: Sequence[Dict[str, Any]],
188
- ) -> Sequence[FunctionDeclaration]:
189
- return [
190
- _convert_dict_to_genai_function(function_declaration_dict, i)
191
- for i, function_declaration_dict in enumerate(function_declaration_dicts)
192
- ]
193
-
194
-
195
- def _convert_dict_to_genai_function(
196
- function_declaration_dict: Dict[str, Any], declaration_index: int = 0
197
- ) -> FunctionDeclaration:
198
- formatted_fc = {
199
- "name": function_declaration_dict.get("name", f"unknown-{declaration_index}"),
200
- "description": function_declaration_dict.get("description", "no-description"),
201
- }
202
- if "parameters" in function_declaration_dict:
203
- formatted_fc["parameters"] = {
204
- "properties": {
205
- k: {
206
- "type_": TYPE_ENUM[v["type"]],
207
- "description": v.get("description"),
208
- }
209
- for k, v in function_declaration_dict["parameters"][
210
- "properties"
211
- ].items()
212
- },
213
- "required": function_declaration_dict.get("parameters", []).get(
214
- "required", []
215
- ),
216
- "type_": TYPE_ENUM[function_declaration_dict["parameters"]["type"]],
217
- }
218
- return FunctionDeclaration(**formatted_fc)
219
-
220
-
221
- def _convert_tool_to_genai_function(tool: BaseTool) -> FunctionDeclaration:
222
- if tool.args_schema:
223
- fc = tool.args_schema
224
- if isinstance(fc, type) and issubclass(fc, BaseModel):
225
- return _convert_pydantic_to_genai_function(
226
- fc, tool_name=tool.name, tool_description=tool.description
227
- )
228
- raise ValueError(f"Unsupported function call type {fc}")
229
- else:
230
- return FunctionDeclaration(
231
- name=tool.name,
232
- description=tool.description,
233
- parameters={
234
- "properties": {
235
- "__arg1": {"type_": TYPE_ENUM["string"]},
236
- },
237
- "required": ["__arg1"],
238
- "type_": TYPE_ENUM["object"],
239
- },
240
- )
241
-
242
-
243
- def _convert_pydantic_to_genai_function(
244
- pydantic_model: Type[BaseModel],
245
- tool_name: Optional[str] = None,
246
- tool_description: Optional[str] = None,
247
- ) -> FunctionDeclaration:
248
- schema = dereference_refs(pydantic_model.schema())
249
- schema.pop("definitions", None)
250
- function_declaration = FunctionDeclaration(
251
- name=tool_name if tool_name else schema.get("title"),
252
- description=tool_description if tool_description else schema.get("description"),
253
- parameters={
254
- "properties": {
255
- k: {
256
- "type_": _get_type_from_schema(v),
257
- "description": v.get("description"),
258
- }
259
- for k, v in schema["properties"].items()
260
- },
261
- "required": schema.get("required", []),
262
- "type_": TYPE_ENUM[schema["type"]],
263
- },
264
- )
265
- return function_declaration
266
-
267
-
268
- def _get_type_from_schema(schema: Dict[str, Any]) -> int:
269
- if "anyOf" in schema:
270
- types = [_get_type_from_schema(sub_schema) for sub_schema in schema["anyOf"]]
271
- types = [t for t in types if t is not None] # Remove None values
272
- if types:
273
- return types[-1] # TODO: update FunctionDeclaration and pass all types?
274
- else:
275
- pass
276
- elif "type" in schema:
277
- stype = str(schema["type"])
278
- if stype in TYPE_ENUM:
279
- return TYPE_ENUM[stype]
280
- else:
281
- pass
282
- else:
283
- pass
284
- return TYPE_ENUM["string"] # Default to string if no valid types found
285
-
286
-
287
- _ToolChoiceType = Union[
288
- dict, List[str], str, Literal["auto", "none", "any"], Literal[True]
289
- ]
290
-
291
-
292
- class _FunctionCallingConfigDict(TypedDict):
293
- mode: Union[FunctionCallingConfig.Mode, str]
294
- allowed_function_names: Optional[List[str]]
295
-
296
-
297
- class _ToolConfigDict(TypedDict):
298
- function_calling_config: _FunctionCallingConfigDict
299
-
300
-
301
- def _tool_choice_to_tool_config(
302
- tool_choice: _ToolChoiceType,
303
- all_names: List[str],
304
- ) -> _ToolConfigDict:
305
- allowed_function_names: Optional[List[str]] = None
306
- if tool_choice is True or tool_choice == "any":
307
- mode = "any"
308
- allowed_function_names = all_names
309
- elif tool_choice == "auto":
310
- mode = "auto"
311
- elif tool_choice == "none":
312
- mode = "none"
313
- elif isinstance(tool_choice, str):
314
- mode = "any"
315
- allowed_function_names = [tool_choice]
316
- elif isinstance(tool_choice, list):
317
- mode = "any"
318
- allowed_function_names = tool_choice
319
- elif isinstance(tool_choice, dict):
320
- if "mode" in tool_choice:
321
- mode = tool_choice["mode"]
322
- allowed_function_names = tool_choice.get("allowed_function_names")
323
- elif "function_calling_config" in tool_choice:
324
- mode = tool_choice["function_calling_config"]["mode"]
325
- allowed_function_names = tool_choice["function_calling_config"].get(
326
- "allowed_function_names"
327
- )
328
- else:
329
- raise ValueError(
330
- f"Unrecognized tool choice format:\n\n{tool_choice=}\n\nShould match "
331
- f"Google GenerativeAI ToolConfig or FunctionCallingConfig format."
332
- )
333
- else:
334
- raise ValueError(f"Unrecognized tool choice format:\n\n{tool_choice=}")
335
- return _ToolConfigDict(
336
- function_calling_config={
337
- "mode": mode,
338
- "allowed_function_names": allowed_function_names,
339
- }
340
- )