langchain-google-genai 0.0.3__tar.gz → 0.0.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-google-genai
3
- Version: 0.0.3
3
+ Version: 0.0.5
4
4
  Summary: An integration package connecting Google's genai package and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain/blob/master/libs/partners/google-genai
6
6
  Requires-Python: >=3.9,<4.0
@@ -8,8 +8,10 @@ Classifier: Programming Language :: Python :: 3
8
8
  Classifier: Programming Language :: Python :: 3.9
9
9
  Classifier: Programming Language :: Python :: 3.10
10
10
  Classifier: Programming Language :: Python :: 3.11
11
+ Provides-Extra: images
11
12
  Requires-Dist: google-generativeai (>=0.3.1,<0.4.0)
12
13
  Requires-Dist: langchain-core (>=0.1,<0.2)
14
+ Requires-Dist: pillow (>=10.1.0,<11.0.0) ; extra == "images"
13
15
  Project-URL: Repository, https://github.com/langchain-ai/langchain/blob/master/libs/partners/google-genai
14
16
  Description-Content-Type: text/markdown
15
17
 
@@ -19,10 +21,17 @@ This package contains the LangChain integrations for Gemini through their genera
19
21
 
20
22
  ## Installation
21
23
 
22
- ```python
24
+ ```bash
23
25
  pip install -U langchain-google-genai
24
26
  ```
25
27
 
28
+ ### Image utilities
29
+ To use image utility methods, like loading images from GCS urls, install with extras group 'images':
30
+
31
+ ```bash
32
+ pip install -e "langchain-google-genai[images]"
33
+ ```
34
+
26
35
  ## Chat Models
27
36
 
28
37
  This package contains the `ChatGoogleGenerativeAI` class, which is the recommended way to interface with the Google Gemini series of models.
@@ -4,10 +4,17 @@ This package contains the LangChain integrations for Gemini through their genera
4
4
 
5
5
  ## Installation
6
6
 
7
- ```python
7
+ ```bash
8
8
  pip install -U langchain-google-genai
9
9
  ```
10
10
 
11
+ ### Image utilities
12
+ To use image utility methods, like loading images from GCS urls, install with extras group 'images':
13
+
14
+ ```bash
15
+ pip install -e "langchain-google-genai[images]"
16
+ ```
17
+
11
18
  ## Chat Models
12
19
 
13
20
  This package contains the `ChatGoogleGenerativeAI` class, which is the recommended way to interface with the Google Gemini series of models.
@@ -6,11 +6,14 @@ This module integrates Google's Generative AI models, specifically the Gemini se
6
6
 
7
7
  The `ChatGoogleGenerativeAI` class is the primary interface for interacting with Google's Gemini chat models. It allows users to send and receive messages using a specified Gemini model, suitable for various conversational AI applications.
8
8
 
9
+ **LLMs**
10
+
11
+ The `GoogleGenerativeAI` class is the primary interface for interacting with Google's Gemini LLMs. It allows users to generate text using a specified Gemini model.
12
+
9
13
  **Embeddings**
10
14
 
11
15
  The `GoogleGenerativeAIEmbeddings` class provides functionalities to generate embeddings using Google's models.
12
16
  These embeddings can be used for a range of NLP tasks, including semantic analysis, similarity comparisons, and more.
13
-
14
17
  **Installation**
15
18
 
16
19
  To install the package, use pip:
@@ -29,6 +32,17 @@ llm = ChatGoogleGenerativeAI(model="gemini-pro")
29
32
  llm.invoke("Sing a ballad of LangChain.")
30
33
  ```
31
34
 
35
+ ## Using LLMs
36
+
37
+ The package also supports generating text with Google's models.
38
+
39
+ ```python
40
+ from langchain_google_genai import GoogleGenerativeAI
41
+
42
+ llm = GoogleGenerativeAI(model="gemini-pro")
43
+ llm.invoke("Once upon a time, a library called LangChain")
44
+ ```
45
+
32
46
  ## Embedding Generation
33
47
 
34
48
  The package also supports creating embeddings with Google's models, useful for textual similarity and other NLP applications.
@@ -42,5 +56,10 @@ embeddings.embed_query("hello, world!")
42
56
  """ # noqa: E501
43
57
  from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
44
58
  from langchain_google_genai.embeddings import GoogleGenerativeAIEmbeddings
59
+ from langchain_google_genai.llms import GoogleGenerativeAI
45
60
 
46
- __all__ = ["ChatGoogleGenerativeAI", "GoogleGenerativeAIEmbeddings"]
61
+ __all__ = [
62
+ "ChatGoogleGenerativeAI",
63
+ "GoogleGenerativeAIEmbeddings",
64
+ "GoogleGenerativeAI",
65
+ ]
@@ -37,6 +37,7 @@ from langchain_core.messages import (
37
37
  ChatMessageChunk,
38
38
  HumanMessage,
39
39
  HumanMessageChunk,
40
+ SystemMessage,
40
41
  )
41
42
  from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
42
43
  from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
@@ -106,7 +107,7 @@ def _create_retry_decorator() -> Callable[[Any], Any]:
106
107
  )
107
108
 
108
109
 
109
- def _chat_with_retry(*, generation_method: Callable, **kwargs: Any) -> Any:
110
+ def _chat_with_retry(generation_method: Callable, **kwargs: Any) -> Any:
110
111
  """
111
112
  Executes a chat generation method with retry logic using tenacity.
112
113
 
@@ -139,7 +140,7 @@ def _chat_with_retry(*, generation_method: Callable, **kwargs: Any) -> Any:
139
140
  return _chat_with_retry(**kwargs)
140
141
 
141
142
 
142
- async def _achat_with_retry(*, generation_method: Callable, **kwargs: Any) -> Any:
143
+ async def _achat_with_retry(generation_method: Callable, **kwargs: Any) -> Any:
143
144
  """
144
145
  Executes a chat generation method with retry logic using tenacity.
145
146
 
@@ -172,26 +173,6 @@ async def _achat_with_retry(*, generation_method: Callable, **kwargs: Any) -> An
172
173
  return await _achat_with_retry(**kwargs)
173
174
 
174
175
 
175
- def _get_role(message: BaseMessage) -> str:
176
- if isinstance(message, ChatMessage):
177
- if message.role not in ("user", "model"):
178
- raise ChatGoogleGenerativeAIError(
179
- "Gemini only supports user and model roles when"
180
- " providing it with Chat messages."
181
- )
182
- return message.role
183
- elif isinstance(message, HumanMessage):
184
- return "user"
185
- elif isinstance(message, AIMessage):
186
- return "model"
187
- else:
188
- # TODO: Gemini doesn't seem to have a concept of system messages yet.
189
- raise ChatGoogleGenerativeAIError(
190
- f"Message of '{message.type}' type not supported by Gemini."
191
- " Please only provide it with Human or AI (user/assistant) messages."
192
- )
193
-
194
-
195
176
  def _is_openai_parts_format(part: dict) -> bool:
196
177
  return "type" in part
197
178
 
@@ -266,13 +247,14 @@ def _url_to_pil(image_source: str) -> Image:
266
247
 
267
248
 
268
249
  def _convert_to_parts(
269
- content: Sequence[Union[str, dict]],
250
+ raw_content: Union[str, Sequence[Union[str, dict]]],
270
251
  ) -> List[genai.types.PartType]:
271
252
  """Converts a list of LangChain messages into a google parts."""
272
253
  parts = []
254
+ content = [raw_content] if isinstance(raw_content, str) else raw_content
273
255
  for part in content:
274
256
  if isinstance(part, str):
275
- parts.append(genai.types.PartDict(text=part, inline_data=None))
257
+ parts.append(genai.types.PartDict(text=part))
276
258
  elif isinstance(part, Mapping):
277
259
  # OpenAI Format
278
260
  if _is_openai_parts_format(part):
@@ -304,27 +286,49 @@ def _convert_to_parts(
304
286
  return parts
305
287
 
306
288
 
307
- def _messages_to_genai_contents(
308
- input_messages: Sequence[BaseMessage],
289
+ def _parse_chat_history(
290
+ input_messages: Sequence[BaseMessage], convert_system_message_to_human: bool = False
309
291
  ) -> List[genai.types.ContentDict]:
310
- """Converts a list of messages into a Gemini API google content dicts."""
311
-
312
292
  messages: List[genai.types.MessageDict] = []
293
+
294
+ raw_system_message: Optional[SystemMessage] = None
313
295
  for i, message in enumerate(input_messages):
314
- role = _get_role(message)
315
- if isinstance(message.content, str):
316
- parts = [message.content]
296
+ if (
297
+ i == 0
298
+ and isinstance(message, SystemMessage)
299
+ and not convert_system_message_to_human
300
+ ):
301
+ raise ValueError(
302
+ """SystemMessages are not yet supported!
303
+
304
+ To automatically convert the leading SystemMessage to a HumanMessage,
305
+ set `convert_system_message_to_human` to True. Example:
306
+
307
+ llm = ChatGoogleGenerativeAI(model="gemini-pro", convert_system_message_to_human=True)
308
+ """
309
+ )
310
+ elif i == 0 and isinstance(message, SystemMessage):
311
+ raw_system_message = message
312
+ continue
313
+ elif isinstance(message, AIMessage):
314
+ role = "model"
315
+ elif isinstance(message, HumanMessage):
316
+ role = "user"
317
317
  else:
318
- parts = _convert_to_parts(message.content)
319
- messages.append({"role": role, "parts": parts})
320
- if i > 0:
321
- # Cannot have multiple messages from the same role in a row.
322
- if role == messages[-2]["role"]:
323
- raise ChatGoogleGenerativeAIError(
324
- "Cannot have multiple messages from the same role in a row."
325
- " Consider merging them into a single message with multiple"
326
- f" parts.\nReceived: {messages}"
318
+ raise ValueError(
319
+ f"Unexpected message with type {type(message)} at the position {i}."
320
+ )
321
+
322
+ parts = _convert_to_parts(message.content)
323
+ if raw_system_message:
324
+ if role == "model":
325
+ raise ValueError(
326
+ "SystemMessage should be followed by a HumanMessage and "
327
+ "not by AIMessage."
327
328
  )
329
+ parts = _convert_to_parts(raw_system_message.content) + parts
330
+ raw_system_message = None
331
+ messages.append({"role": role, "parts": parts})
328
332
  return messages
329
333
 
330
334
 
@@ -457,8 +461,11 @@ Supported examples:
457
461
  n: int = Field(default=1, alias="candidate_count")
458
462
  """Number of chat completions to generate for each prompt. Note that the API may
459
463
  not return the full n completions if duplicates are generated."""
460
-
461
- _generative_model: Any #: :meta private:
464
+ convert_system_message_to_human: bool = False
465
+ """Whether to merge any leading SystemMessage into the following HumanMessage.
466
+
467
+ Gemini does not support system messages; any unsupported messages will
468
+ raise an error."""
462
469
 
463
470
  class Config:
464
471
  allow_population_by_field_name = True
@@ -499,7 +506,7 @@ Supported examples:
499
506
  if values.get("top_k") is not None and values["top_k"] <= 0:
500
507
  raise ValueError("top_k must be positive")
501
508
  model = values["model"]
502
- values["_generative_model"] = genai.GenerativeModel(model_name=model)
509
+ values["client"] = genai.GenerativeModel(model_name=model)
503
510
  return values
504
511
 
505
512
  @property
@@ -512,18 +519,9 @@ Supported examples:
512
519
  "n": self.n,
513
520
  }
514
521
 
515
- @property
516
- def _generation_method(self) -> Callable:
517
- return self._generative_model.generate_content
518
-
519
- @property
520
- def _async_generation_method(self) -> Callable:
521
- return self._generative_model.generate_content_async
522
-
523
522
  def _prepare_params(
524
- self, messages: Sequence[BaseMessage], stop: Optional[List[str]], **kwargs: Any
523
+ self, stop: Optional[List[str]], **kwargs: Any
525
524
  ) -> Dict[str, Any]:
526
- contents = _messages_to_genai_contents(messages)
527
525
  gen_config = {
528
526
  k: v
529
527
  for k, v in {
@@ -538,7 +536,7 @@ Supported examples:
538
536
  }
539
537
  if "generation_config" in kwargs:
540
538
  gen_config = {**gen_config, **kwargs.pop("generation_config")}
541
- params = {"generation_config": gen_config, "contents": contents, **kwargs}
539
+ params = {"generation_config": gen_config, **kwargs}
542
540
  return params
543
541
 
544
542
  def _generate(
@@ -548,10 +546,11 @@ Supported examples:
548
546
  run_manager: Optional[CallbackManagerForLLMRun] = None,
549
547
  **kwargs: Any,
550
548
  ) -> ChatResult:
551
- params = self._prepare_params(messages, stop, **kwargs)
549
+ params, chat, message = self._prepare_chat(messages, stop=stop)
552
550
  response: genai.types.GenerateContentResponse = _chat_with_retry(
551
+ content=message,
553
552
  **params,
554
- generation_method=self._generation_method,
553
+ generation_method=chat.send_message,
555
554
  )
556
555
  return _response_to_result(response)
557
556
 
@@ -562,10 +561,11 @@ Supported examples:
562
561
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
563
562
  **kwargs: Any,
564
563
  ) -> ChatResult:
565
- params = self._prepare_params(messages, stop, **kwargs)
564
+ params, chat, message = self._prepare_chat(messages, stop=stop)
566
565
  response: genai.types.GenerateContentResponse = await _achat_with_retry(
566
+ content=message,
567
567
  **params,
568
- generation_method=self._async_generation_method,
568
+ generation_method=chat.send_message_async,
569
569
  )
570
570
  return _response_to_result(response)
571
571
 
@@ -576,10 +576,11 @@ Supported examples:
576
576
  run_manager: Optional[CallbackManagerForLLMRun] = None,
577
577
  **kwargs: Any,
578
578
  ) -> Iterator[ChatGenerationChunk]:
579
- params = self._prepare_params(messages, stop, **kwargs)
579
+ params, chat, message = self._prepare_chat(messages, stop=stop)
580
580
  response: genai.types.GenerateContentResponse = _chat_with_retry(
581
+ content=message,
581
582
  **params,
582
- generation_method=self._generation_method,
583
+ generation_method=chat.send_message,
583
584
  stream=True,
584
585
  )
585
586
  for chunk in response:
@@ -602,10 +603,11 @@ Supported examples:
602
603
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
603
604
  **kwargs: Any,
604
605
  ) -> AsyncIterator[ChatGenerationChunk]:
605
- params = self._prepare_params(messages, stop, **kwargs)
606
+ params, chat, message = self._prepare_chat(messages, stop=stop)
606
607
  async for chunk in await _achat_with_retry(
608
+ content=message,
607
609
  **params,
608
- generation_method=self._async_generation_method,
610
+ generation_method=chat.send_message_async,
609
611
  stream=True,
610
612
  ):
611
613
  _chat_result = _response_to_result(
@@ -619,3 +621,18 @@ Supported examples:
619
621
  yield gen
620
622
  if run_manager:
621
623
  await run_manager.on_llm_new_token(gen.text)
624
+
625
+ def _prepare_chat(
626
+ self,
627
+ messages: List[BaseMessage],
628
+ stop: Optional[List[str]] = None,
629
+ **kwargs: Any,
630
+ ) -> Tuple[Dict[str, Any], genai.ChatSession, genai.types.ContentDict]:
631
+ params = self._prepare_params(stop, **kwargs)
632
+ history = _parse_chat_history(
633
+ messages,
634
+ convert_system_message_to_human=self.convert_system_message_to_human,
635
+ )
636
+ message = history.pop()
637
+ chat = self.client.start_chat(history=history)
638
+ return params, chat, message
@@ -0,0 +1,262 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Callable, Dict, Iterator, List, Optional, Union
4
+
5
+ import google.api_core
6
+ import google.generativeai as genai # type: ignore[import]
7
+ from langchain_core.callbacks import (
8
+ AsyncCallbackManagerForLLMRun,
9
+ CallbackManagerForLLMRun,
10
+ )
11
+ from langchain_core.language_models import LanguageModelInput
12
+ from langchain_core.language_models.llms import BaseLLM, create_base_retry_decorator
13
+ from langchain_core.outputs import Generation, GenerationChunk, LLMResult
14
+ from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
15
+ from langchain_core.utils import get_from_dict_or_env
16
+
17
+
18
+ def _create_retry_decorator(
19
+ llm: BaseLLM,
20
+ *,
21
+ max_retries: int = 1,
22
+ run_manager: Optional[
23
+ Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
24
+ ] = None,
25
+ ) -> Callable[[Any], Any]:
26
+ """Creates a retry decorator for Vertex / Palm LLMs."""
27
+
28
+ errors = [
29
+ google.api_core.exceptions.ResourceExhausted,
30
+ google.api_core.exceptions.ServiceUnavailable,
31
+ google.api_core.exceptions.Aborted,
32
+ google.api_core.exceptions.DeadlineExceeded,
33
+ google.api_core.exceptions.GoogleAPIError,
34
+ ]
35
+ decorator = create_base_retry_decorator(
36
+ error_types=errors, max_retries=max_retries, run_manager=run_manager
37
+ )
38
+ return decorator
39
+
40
+
41
+ def _completion_with_retry(
42
+ llm: GoogleGenerativeAI,
43
+ prompt: LanguageModelInput,
44
+ is_gemini: bool = False,
45
+ stream: bool = False,
46
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
47
+ **kwargs: Any,
48
+ ) -> Any:
49
+ """Use tenacity to retry the completion call."""
50
+ retry_decorator = _create_retry_decorator(
51
+ llm, max_retries=llm.max_retries, run_manager=run_manager
52
+ )
53
+
54
+ @retry_decorator
55
+ def _completion_with_retry(
56
+ prompt: LanguageModelInput, is_gemini: bool, stream: bool, **kwargs: Any
57
+ ) -> Any:
58
+ generation_config = kwargs.get("generation_config", {})
59
+ if is_gemini:
60
+ return llm.client.generate_content(
61
+ contents=prompt, stream=stream, generation_config=generation_config
62
+ )
63
+ return llm.client.generate_text(prompt=prompt, **kwargs)
64
+
65
+ return _completion_with_retry(
66
+ prompt=prompt, is_gemini=is_gemini, stream=stream, **kwargs
67
+ )
68
+
69
+
70
+ def _is_gemini_model(model_name: str) -> bool:
71
+ return "gemini" in model_name
72
+
73
+
74
+ def _strip_erroneous_leading_spaces(text: str) -> str:
75
+ """Strip erroneous leading spaces from text.
76
+
77
+ The PaLM API will sometimes erroneously return a single leading space in all
78
+ lines > 1. This function strips that space.
79
+ """
80
+ has_leading_space = all(not line or line[0] == " " for line in text.split("\n")[1:])
81
+ if has_leading_space:
82
+ return text.replace("\n ", "\n")
83
+ else:
84
+ return text
85
+
86
+
87
+ class GoogleGenerativeAI(BaseLLM, BaseModel):
88
+ """Google GenerativeAI models.
89
+
90
+ Example:
91
+ .. code-block:: python
92
+
93
+ from langchain_google_genai import GoogleGenerativeAI
94
+ llm = GoogleGenerativeAI(model="gemini-pro")
95
+ """
96
+
97
+ client: Any #: :meta private:
98
+ model: str = Field(
99
+ ...,
100
+ description="""The name of the model to use.
101
+ Supported examples:
102
+ - gemini-pro
103
+ - models/text-bison-001""",
104
+ )
105
+ """Model name to use."""
106
+ google_api_key: Optional[SecretStr] = None
107
+ temperature: float = 0.7
108
+ """Run inference with this temperature. Must by in the closed interval
109
+ [0.0, 1.0]."""
110
+ top_p: Optional[float] = None
111
+ """Decode using nucleus sampling: consider the smallest set of tokens whose
112
+ probability sum is at least top_p. Must be in the closed interval [0.0, 1.0]."""
113
+ top_k: Optional[int] = None
114
+ """Decode using top-k sampling: consider the set of top_k most probable tokens.
115
+ Must be positive."""
116
+ max_output_tokens: Optional[int] = None
117
+ """Maximum number of tokens to include in a candidate. Must be greater than zero.
118
+ If unset, will default to 64."""
119
+ n: int = 1
120
+ """Number of chat completions to generate for each prompt. Note that the API may
121
+ not return the full n completions if duplicates are generated."""
122
+ max_retries: int = 6
123
+ """The maximum number of retries to make when generating."""
124
+
125
+ @property
126
+ def is_gemini(self) -> bool:
127
+ """Returns whether a model is belongs to a Gemini family or not."""
128
+ return _is_gemini_model(self.model)
129
+
130
+ @property
131
+ def lc_secrets(self) -> Dict[str, str]:
132
+ return {"google_api_key": "GOOGLE_API_KEY"}
133
+
134
+ @root_validator()
135
+ def validate_environment(cls, values: Dict) -> Dict:
136
+ """Validate api key, python package exists."""
137
+ google_api_key = get_from_dict_or_env(
138
+ values, "google_api_key", "GOOGLE_API_KEY"
139
+ )
140
+ model_name = values["model"]
141
+
142
+ if isinstance(google_api_key, SecretStr):
143
+ google_api_key = google_api_key.get_secret_value()
144
+
145
+ genai.configure(api_key=google_api_key)
146
+
147
+ if _is_gemini_model(model_name):
148
+ values["client"] = genai.GenerativeModel(model_name=model_name)
149
+ else:
150
+ values["client"] = genai
151
+
152
+ if values["temperature"] is not None and not 0 <= values["temperature"] <= 1:
153
+ raise ValueError("temperature must be in the range [0.0, 1.0]")
154
+
155
+ if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
156
+ raise ValueError("top_p must be in the range [0.0, 1.0]")
157
+
158
+ if values["top_k"] is not None and values["top_k"] <= 0:
159
+ raise ValueError("top_k must be positive")
160
+
161
+ if values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0:
162
+ raise ValueError("max_output_tokens must be greater than zero")
163
+
164
+ return values
165
+
166
+ def _generate(
167
+ self,
168
+ prompts: List[str],
169
+ stop: Optional[List[str]] = None,
170
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
171
+ **kwargs: Any,
172
+ ) -> LLMResult:
173
+ generations: List[List[Generation]] = []
174
+ generation_config = {
175
+ "stop_sequences": stop,
176
+ "temperature": self.temperature,
177
+ "top_p": self.top_p,
178
+ "top_k": self.top_k,
179
+ "max_output_tokens": self.max_output_tokens,
180
+ "candidate_count": self.n,
181
+ }
182
+ for prompt in prompts:
183
+ if self.is_gemini:
184
+ res = _completion_with_retry(
185
+ self,
186
+ prompt=prompt,
187
+ stream=False,
188
+ is_gemini=True,
189
+ run_manager=run_manager,
190
+ generation_config=generation_config,
191
+ )
192
+ candidates = [
193
+ "".join([p.text for p in c.content.parts]) for c in res.candidates
194
+ ]
195
+ generations.append([Generation(text=c) for c in candidates])
196
+ else:
197
+ res = _completion_with_retry(
198
+ self,
199
+ model=self.model,
200
+ prompt=prompt,
201
+ stream=False,
202
+ is_gemini=False,
203
+ run_manager=run_manager,
204
+ **generation_config,
205
+ )
206
+ prompt_generations = []
207
+ for candidate in res.candidates:
208
+ raw_text = candidate["output"]
209
+ stripped_text = _strip_erroneous_leading_spaces(raw_text)
210
+ prompt_generations.append(Generation(text=stripped_text))
211
+ generations.append(prompt_generations)
212
+
213
+ return LLMResult(generations=generations)
214
+
215
+ def _stream(
216
+ self,
217
+ prompt: str,
218
+ stop: Optional[List[str]] = None,
219
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
220
+ **kwargs: Any,
221
+ ) -> Iterator[GenerationChunk]:
222
+ generation_config = kwargs.get("generation_config", {})
223
+ if stop:
224
+ generation_config["stop_sequences"] = stop
225
+ for stream_resp in _completion_with_retry(
226
+ self,
227
+ prompt,
228
+ stream=True,
229
+ is_gemini=True,
230
+ run_manager=run_manager,
231
+ generation_config=generation_config,
232
+ **kwargs,
233
+ ):
234
+ chunk = GenerationChunk(text=stream_resp.text)
235
+ yield chunk
236
+ if run_manager:
237
+ run_manager.on_llm_new_token(
238
+ stream_resp.text,
239
+ chunk=chunk,
240
+ verbose=self.verbose,
241
+ )
242
+
243
+ @property
244
+ def _llm_type(self) -> str:
245
+ """Return type of llm."""
246
+ return "google_palm"
247
+
248
+ def get_num_tokens(self, text: str) -> int:
249
+ """Get the number of tokens present in the text.
250
+
251
+ Useful for checking if an input will fit in a model's context window.
252
+
253
+ Args:
254
+ text: The string input to tokenize.
255
+
256
+ Returns:
257
+ The integer number of tokens in the text.
258
+ """
259
+ if self.is_gemini:
260
+ raise ValueError("Counting tokens is not yet supported!")
261
+ result = self.client.count_text_tokens(model=self.model, prompt=text)
262
+ return result["token_count"]
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langchain-google-genai"
3
- version = "0.0.3"
3
+ version = "0.0.5"
4
4
  description = "An integration package connecting Google's genai package and LangChain"
5
5
  authors = []
6
6
  readme = "README.md"
@@ -10,6 +10,10 @@ repository = "https://github.com/langchain-ai/langchain/blob/master/libs/partner
10
10
  python = ">=3.9,<4.0"
11
11
  langchain-core = "^0.1"
12
12
  google-generativeai = "^0.3.1"
13
+ pillow = { version = "^10.1.0", optional = true }
14
+
15
+ [tool.poetry.extras]
16
+ images = ["pillow"]
13
17
 
14
18
  [tool.poetry.group.test]
15
19
  optional = true
@@ -34,6 +38,8 @@ codespell = "^2.2.0"
34
38
  optional = true
35
39
 
36
40
  [tool.poetry.group.test_integration.dependencies]
41
+ pillow = "^10.1.0"
42
+
37
43
 
38
44
  [tool.poetry.group.lint]
39
45
  optional = true