langchain-google-genai 0.0.7__py3-none-any.whl → 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,6 +7,7 @@ from typing import (
7
7
  Union,
8
8
  )
9
9
 
10
+ import google.ai.generativelanguage as glm
10
11
  from langchain_core.pydantic_v1 import BaseModel
11
12
  from langchain_core.tools import BaseTool
12
13
  from langchain_core.utils.json_schema import dereference_refs
@@ -14,55 +15,36 @@ from langchain_core.utils.json_schema import dereference_refs
14
15
  FunctionCallType = Union[BaseTool, Type[BaseModel], Dict]
15
16
 
16
17
  TYPE_ENUM = {
17
- "string": 1,
18
- "number": 2,
19
- "integer": 3,
20
- "boolean": 4,
21
- "array": 5,
22
- "object": 6,
18
+ "string": glm.Type.STRING,
19
+ "number": glm.Type.NUMBER,
20
+ "integer": glm.Type.INTEGER,
21
+ "boolean": glm.Type.BOOLEAN,
22
+ "array": glm.Type.ARRAY,
23
+ "object": glm.Type.OBJECT,
23
24
  }
24
25
 
25
26
 
26
27
  def convert_to_genai_function_declarations(
27
28
  function_calls: List[FunctionCallType],
28
- ) -> Dict:
29
- function_declarations = []
30
- for fc in function_calls:
31
- function_declarations.append(_convert_to_genai_function(fc))
32
- return {
33
- "function_declarations": function_declarations,
34
- }
29
+ ) -> List[glm.Tool]:
30
+ return [
31
+ glm.Tool(
32
+ function_declarations=[_convert_to_genai_function(fc)],
33
+ )
34
+ for fc in function_calls
35
+ ]
35
36
 
36
37
 
37
- def _convert_to_genai_function(fc: FunctionCallType) -> Dict:
38
- """
39
- Produce
40
-
41
- {
42
- "name": "get_weather",
43
- "description": "Determine weather in my location",
44
- "parameters": {
45
- "properties": {
46
- "location": {
47
- "description": "The city and state e.g. San Francisco, CA",
48
- "type_": 1
49
- },
50
- "unit": { "enum": ["c", "f"], "type_": 1 }
51
- },
52
- "required": ["location"],
53
- "type_": 6
54
- }
55
- }
56
-
57
- """
38
+ def _convert_to_genai_function(fc: FunctionCallType) -> glm.FunctionDeclaration:
58
39
  if isinstance(fc, BaseTool):
59
40
  return _convert_tool_to_genai_function(fc)
60
41
  elif isinstance(fc, type) and issubclass(fc, BaseModel):
61
42
  return _convert_pydantic_to_genai_function(fc)
62
43
  elif isinstance(fc, dict):
63
- return {
64
- **fc,
65
- "parameters": {
44
+ return glm.FunctionDeclaration(
45
+ name=fc["name"],
46
+ description=fc.get("description"),
47
+ parameters={
66
48
  "properties": {
67
49
  k: {
68
50
  "type_": TYPE_ENUM[v["type"]],
@@ -73,20 +55,20 @@ def _convert_to_genai_function(fc: FunctionCallType) -> Dict:
73
55
  "required": fc["parameters"].get("required", []),
74
56
  "type_": TYPE_ENUM[fc["parameters"]["type"]],
75
57
  },
76
- }
58
+ )
77
59
  else:
78
60
  raise ValueError(f"Unsupported function call type {fc}")
79
61
 
80
62
 
81
- def _convert_tool_to_genai_function(tool: BaseTool) -> Dict:
63
+ def _convert_tool_to_genai_function(tool: BaseTool) -> glm.FunctionDeclaration:
82
64
  if tool.args_schema:
83
65
  schema = dereference_refs(tool.args_schema.schema())
84
66
  schema.pop("definitions", None)
85
67
 
86
- return {
87
- "name": tool.name or schema["title"],
88
- "description": tool.description or schema["description"],
89
- "parameters": {
68
+ return glm.FunctionDeclaration(
69
+ name=tool.name or schema["title"],
70
+ description=tool.description or schema["description"],
71
+ parameters={
90
72
  "properties": {
91
73
  k: {
92
74
  "type_": TYPE_ENUM[v["type"]],
@@ -97,31 +79,30 @@ def _convert_tool_to_genai_function(tool: BaseTool) -> Dict:
97
79
  "required": schema["required"],
98
80
  "type_": TYPE_ENUM[schema["type"]],
99
81
  },
100
- }
82
+ )
101
83
  else:
102
- return {
103
- "name": tool.name,
104
- "description": tool.description,
105
- "parameters": {
84
+ return glm.FunctionDeclaration(
85
+ name=tool.name,
86
+ description=tool.description,
87
+ parameters={
106
88
  "properties": {
107
- "__arg1": {"type": "string"},
89
+ "__arg1": {"type_": TYPE_ENUM["string"]},
108
90
  },
109
91
  "required": ["__arg1"],
110
92
  "type_": TYPE_ENUM["object"],
111
93
  },
112
- }
94
+ )
113
95
 
114
96
 
115
97
  def _convert_pydantic_to_genai_function(
116
98
  pydantic_model: Type[BaseModel],
117
- ) -> Dict:
99
+ ) -> glm.FunctionDeclaration:
118
100
  schema = dereference_refs(pydantic_model.schema())
119
101
  schema.pop("definitions", None)
120
-
121
- return {
122
- "name": schema["title"],
123
- "description": schema.get("description", ""),
124
- "parameters": {
102
+ return glm.FunctionDeclaration(
103
+ name=schema["title"],
104
+ description=schema.get("description", ""),
105
+ parameters={
125
106
  "properties": {
126
107
  k: {
127
108
  "type_": TYPE_ENUM[v["type"]],
@@ -132,4 +113,4 @@ def _convert_pydantic_to_genai_function(
132
113
  "required": schema["required"],
133
114
  "type_": TYPE_ENUM[schema["type"]],
134
115
  },
135
- }
116
+ )
@@ -16,18 +16,18 @@ from typing import (
16
16
  Optional,
17
17
  Sequence,
18
18
  Tuple,
19
- Type,
20
19
  Union,
21
20
  cast,
22
21
  )
23
22
  from urllib.parse import urlparse
24
23
 
24
+ import google.ai.generativelanguage as glm
25
25
  import google.api_core
26
26
 
27
27
  # TODO: remove ignore once the google package is published with types
28
28
  import google.generativeai as genai # type: ignore[import]
29
+ import proto # type: ignore[import]
29
30
  import requests
30
- from google.ai.generativelanguage_v1beta import FunctionCall
31
31
  from langchain_core.callbacks.manager import (
32
32
  AsyncCallbackManagerForLLMRun,
33
33
  CallbackManagerForLLMRun,
@@ -37,11 +37,8 @@ from langchain_core.messages import (
37
37
  AIMessage,
38
38
  AIMessageChunk,
39
39
  BaseMessage,
40
- ChatMessage,
41
- ChatMessageChunk,
42
40
  FunctionMessage,
43
41
  HumanMessage,
44
- HumanMessageChunk,
45
42
  SystemMessage,
46
43
  )
47
44
  from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
@@ -327,15 +324,42 @@ llm = ChatGoogleGenerativeAI(model="gemini-pro", convert_system_message_to_human
327
324
  continue
328
325
  elif isinstance(message, AIMessage):
329
326
  role = "model"
330
- # TODO: Handle AImessage with function call
331
- parts = _convert_to_parts(message.content)
327
+ raw_function_call = message.additional_kwargs.get("function_call")
328
+ if raw_function_call:
329
+ function_call = glm.FunctionCall(
330
+ {
331
+ "name": raw_function_call["name"],
332
+ "args": json.loads(raw_function_call["arguments"]),
333
+ }
334
+ )
335
+ parts = [glm.Part(function_call=function_call)]
336
+ else:
337
+ parts = _convert_to_parts(message.content)
332
338
  elif isinstance(message, HumanMessage):
333
339
  role = "user"
334
340
  parts = _convert_to_parts(message.content)
335
341
  elif isinstance(message, FunctionMessage):
336
342
  role = "user"
337
- # TODO: Handle FunctionMessage
338
- parts = _convert_to_parts(message.content)
343
+ response: Any
344
+ if not isinstance(message.content, str):
345
+ response = message.content
346
+ else:
347
+ try:
348
+ response = json.loads(message.content)
349
+ except json.JSONDecodeError:
350
+ response = message.content # leave as str representation
351
+ parts = [
352
+ glm.Part(
353
+ function_response=glm.FunctionResponse(
354
+ name=message.name,
355
+ response=(
356
+ {"output": response}
357
+ if not isinstance(response, dict)
358
+ else response
359
+ ),
360
+ )
361
+ )
362
+ ]
339
363
  else:
340
364
  raise ValueError(
341
365
  f"Unexpected message with type {type(message)} at the position {i}."
@@ -353,100 +377,51 @@ llm = ChatGoogleGenerativeAI(model="gemini-pro", convert_system_message_to_human
353
377
  return messages
354
378
 
355
379
 
356
- def _retrieve_function_call_response(
357
- parts: List[genai.types.PartType],
358
- ) -> Optional[Dict]:
359
- for idx, part in enumerate(parts):
360
- if part.function_call and part.function_call.name:
361
- fc: FunctionCall = part.function_call
362
- return {
363
- "function_call": {
364
- "name": fc.name,
365
- "arguments": json.dumps(
366
- dict(fc.args.items())
367
- ), # dump to match other function calling llms for now
368
- }
369
- }
370
- return None
371
-
372
-
373
- def _parts_to_content(
374
- parts: List[genai.types.PartType],
375
- ) -> Tuple[Union[str, List[Union[Dict[Any, Any], str]]], Optional[Dict]]:
376
- """Converts a list of Gemini API Part objects into a list of LangChain messages."""
377
- function_call_resp = _retrieve_function_call_response(parts)
378
-
379
- if len(parts) == 1 and parts[0].text is not None and not parts[0].inline_data:
380
- # Simple text response. The typical response
381
- return parts[0].text, function_call_resp
382
- elif not parts:
383
- logger.warning("Gemini produced an empty response.")
384
- return "", function_call_resp
385
- messages: List[Union[Dict[Any, Any], str]] = []
386
- for part in parts:
387
- if part.text is not None:
388
- messages.append(
389
- {
390
- "type": "text",
391
- "text": part.text,
392
- }
393
- )
394
- else:
395
- # TODO: Handle inline_data if that's a thing?
396
- raise ChatGoogleGenerativeAIError(f"Unexpected part type. {part}")
397
- return messages, function_call_resp
380
+ def _parse_response_candidate(
381
+ response_candidate: glm.Candidate, stream: bool
382
+ ) -> AIMessage:
383
+ first_part = response_candidate.content.parts[0]
384
+ if first_part.function_call:
385
+ function_call = proto.Message.to_dict(first_part.function_call)
386
+ function_call["arguments"] = json.dumps(function_call.pop("args", {}))
387
+ return (AIMessageChunk if stream else AIMessage)(
388
+ content="", additional_kwargs={"function_call": function_call}
389
+ )
390
+ else:
391
+ parts = response_candidate.content.parts
392
+
393
+ if len(parts) == 1 and parts[0].text:
394
+ content: Union[str, List[Union[str, Dict]]] = parts[0].text
395
+ else:
396
+ content = [proto.Message.to_dict(part) for part in parts]
397
+ return (AIMessageChunk if stream else AIMessage)(
398
+ content=content, additional_kwargs={}
399
+ )
398
400
 
399
401
 
400
402
  def _response_to_result(
401
- response: genai.types.GenerateContentResponse,
402
- ai_msg_t: Type[BaseMessage] = AIMessage,
403
- human_msg_t: Type[BaseMessage] = HumanMessage,
404
- chat_msg_t: Type[BaseMessage] = ChatMessage,
405
- generation_t: Type[ChatGeneration] = ChatGeneration,
403
+ response: glm.GenerateContentResponse,
404
+ stream: bool = False,
406
405
  ) -> ChatResult:
407
406
  """Converts a PaLM API response into a LangChain ChatResult."""
408
- llm_output = {}
409
- if response.prompt_feedback:
410
- try:
411
- prompt_feedback = type(response.prompt_feedback).to_dict(
412
- response.prompt_feedback, use_integers_for_enums=False
413
- )
414
- llm_output["prompt_feedback"] = prompt_feedback
415
- except Exception as e:
416
- logger.debug(f"Unable to convert prompt_feedback to dict: {e}")
407
+ llm_output = {"prompt_feedback": proto.Message.to_dict(response.prompt_feedback)}
417
408
 
418
409
  generations: List[ChatGeneration] = []
419
410
 
420
- role_map = {
421
- "model": ai_msg_t,
422
- "user": human_msg_t,
423
- }
424
-
425
411
  for candidate in response.candidates:
426
- content = candidate.content
427
- parts_content, additional_kwargs = _parts_to_content(content.parts)
428
- if content.role not in role_map:
429
- logger.warning(
430
- f"Unrecognized role: {content.role}. Treating as a ChatMessage."
431
- )
432
- msg = chat_msg_t(
433
- content=parts_content,
434
- role=content.role,
435
- additional_kwargs=additional_kwargs or {},
436
- )
437
- else:
438
- msg = role_map[content.role](
439
- content=parts_content,
440
- additional_kwargs=additional_kwargs or {},
441
- )
442
412
  generation_info = {}
443
413
  if candidate.finish_reason:
444
414
  generation_info["finish_reason"] = candidate.finish_reason.name
445
- if candidate.safety_ratings:
446
- generation_info["safety_ratings"] = [
447
- type(rating).to_dict(rating) for rating in candidate.safety_ratings
448
- ]
449
- generations.append(generation_t(message=msg, generation_info=generation_info))
415
+ generation_info["safety_ratings"] = [
416
+ proto.Message.to_dict(safety_rating, use_integers_for_enums=False)
417
+ for safety_rating in candidate.safety_ratings
418
+ ]
419
+ generations.append(
420
+ (ChatGenerationChunk if stream else ChatGeneration)(
421
+ message=_parse_response_candidate(candidate, stream=stream),
422
+ generation_info=generation_info,
423
+ )
424
+ )
450
425
  if not response.candidates:
451
426
  # Likely a "prompt feedback" violation (e.g., toxic input)
452
427
  # Raising an error would be different than how OpenAI handles it,
@@ -455,7 +430,12 @@ def _response_to_result(
455
430
  "Gemini produced an empty response. Continuing with empty message\n"
456
431
  f"Feedback: {response.prompt_feedback}"
457
432
  )
458
- generations = [generation_t(message=ai_msg_t(content=""), generation_info={})]
433
+ generations = [
434
+ (ChatGenerationChunk if stream else ChatGeneration)(
435
+ message=(AIMessageChunk if stream else AIMessage)(content=""),
436
+ generation_info={},
437
+ )
438
+ ]
459
439
  return ChatResult(generations=generations, llm_output=llm_output)
460
440
 
461
441
 
@@ -616,17 +596,11 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
616
596
  stream=True,
617
597
  )
618
598
  for chunk in response:
619
- _chat_result = _response_to_result(
620
- chunk,
621
- ai_msg_t=AIMessageChunk,
622
- human_msg_t=HumanMessageChunk,
623
- chat_msg_t=ChatMessageChunk,
624
- generation_t=ChatGenerationChunk,
625
- )
599
+ _chat_result = _response_to_result(chunk, stream=True)
626
600
  gen = cast(ChatGenerationChunk, _chat_result.generations[0])
627
- yield gen
628
601
  if run_manager:
629
602
  run_manager.on_llm_new_token(gen.text)
603
+ yield gen
630
604
 
631
605
  async def _astream(
632
606
  self,
@@ -646,17 +620,11 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
646
620
  generation_method=chat.send_message_async,
647
621
  stream=True,
648
622
  ):
649
- _chat_result = _response_to_result(
650
- chunk,
651
- ai_msg_t=AIMessageChunk,
652
- human_msg_t=HumanMessageChunk,
653
- chat_msg_t=ChatMessageChunk,
654
- generation_t=ChatGenerationChunk,
655
- )
623
+ _chat_result = _response_to_result(chunk, stream=True)
656
624
  gen = cast(ChatGenerationChunk, _chat_result.generations[0])
657
- yield gen
658
625
  if run_manager:
659
626
  await run_manager.on_llm_new_token(gen.text)
627
+ yield gen
660
628
 
661
629
  def _prepare_chat(
662
630
  self,
@@ -151,6 +151,18 @@ Supported examples:
151
151
  def _model_family(self) -> str:
152
152
  return GoogleModelFamily(self.model)
153
153
 
154
+ @property
155
+ def _identifying_params(self) -> Dict[str, Any]:
156
+ """Get the identifying parameters."""
157
+ return {
158
+ "model": self.model,
159
+ "temperature": self.temperature,
160
+ "top_p": self.top_p,
161
+ "top_k": self.top_k,
162
+ "max_output_tokens": self.max_output_tokens,
163
+ "candidate_count": self.n,
164
+ }
165
+
154
166
 
155
167
  class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
156
168
  """Google GenerativeAI models.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 0.0.7
3
+ Version: 0.0.8
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain
6
6
  License: MIT
@@ -0,0 +1,11 @@
1
+ langchain_google_genai/__init__.py,sha256=cDMb1xbsenQtYBACNP0dYPwA7Rt015MT7HC_XP3X-4Y,2304
2
+ langchain_google_genai/_common.py,sha256=1r0VrrBSTZfGprmICZ5OV-W5SK31jKRFFCNE3vJ3jmk,136
3
+ langchain_google_genai/_function_utils.py,sha256=9IVMPQq5lQB8F_whG3mrGOM_tjmP-TMEY3URHfJnjgI,3640
4
+ langchain_google_genai/chat_models.py,sha256=flq1xYC2OYoijOtU9qJy12CZwJ8sdY3va3x0004pl1M,23208
5
+ langchain_google_genai/embeddings.py,sha256=EMa-sDGXUpAPMSyjA2-YXF_TGrlSlqljNeqysAh574s,3951
6
+ langchain_google_genai/llms.py,sha256=Kk7fCrWbbfR1tpiFRYBX6fgEhgAYq7HQVSNvR6UvWqY,10990
7
+ langchain_google_genai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ langchain_google_genai-0.0.8.dist-info/LICENSE,sha256=DppmdYJVSc1jd0aio6ptnMUn5tIHrdAhQ12SclEBfBg,1072
9
+ langchain_google_genai-0.0.8.dist-info/METADATA,sha256=mwV8mJuT-jVGcq-HQa7IiWjd6HDvVOIVa01lSTe1R-A,2851
10
+ langchain_google_genai-0.0.8.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
11
+ langchain_google_genai-0.0.8.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- langchain_google_genai/__init__.py,sha256=cDMb1xbsenQtYBACNP0dYPwA7Rt015MT7HC_XP3X-4Y,2304
2
- langchain_google_genai/_common.py,sha256=1r0VrrBSTZfGprmICZ5OV-W5SK31jKRFFCNE3vJ3jmk,136
3
- langchain_google_genai/_function_utils.py,sha256=aCLchIQE2SJCQu9lv5qSsSRJiFeQPsCRZHfibwg4_No,3810
4
- langchain_google_genai/chat_models.py,sha256=fvXsT2wLuaQkwxZ0EcprMkYkp7aEXNMcdDQJwUimjkk,24263
5
- langchain_google_genai/embeddings.py,sha256=EMa-sDGXUpAPMSyjA2-YXF_TGrlSlqljNeqysAh574s,3951
6
- langchain_google_genai/llms.py,sha256=BjP9hTBVMu5rb6FQRzK9qnDoBeRqMpDUT1sBSOyJMhA,10609
7
- langchain_google_genai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- langchain_google_genai-0.0.7.dist-info/LICENSE,sha256=DppmdYJVSc1jd0aio6ptnMUn5tIHrdAhQ12SclEBfBg,1072
9
- langchain_google_genai-0.0.7.dist-info/METADATA,sha256=ki5KGByrSZsLg3ZMqoVPLQXhcEbKksxioZk1AJyw4YQ,2851
10
- langchain_google_genai-0.0.7.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
11
- langchain_google_genai-0.0.7.dist-info/RECORD,,