langchain-google-genai 0.0.8__tar.gz → 0.0.10rc0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-google-genai might be problematic. Click here for more details.
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/PKG-INFO +4 -4
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/__init__.py +4 -0
- langchain_google_genai-0.0.10rc0/langchain_google_genai/_enums.py +6 -0
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/chat_models.py +15 -7
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/llms.py +37 -2
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/pyproject.toml +3 -6
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/LICENSE +0 -0
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/README.md +0 -0
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/_common.py +0 -0
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/_function_utils.py +0 -0
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/embeddings.py +0 -0
- {langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/py.typed +0 -0
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-google-genai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.10rc0
|
|
4
4
|
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
|
-
Home-page: https://github.com/langchain-ai/langchain
|
|
5
|
+
Home-page: https://github.com/langchain-ai/langchain-google
|
|
6
6
|
License: MIT
|
|
7
7
|
Requires-Python: >=3.9,<4.0
|
|
8
8
|
Classifier: License :: OSI Approved :: MIT License
|
|
@@ -15,8 +15,8 @@ Provides-Extra: images
|
|
|
15
15
|
Requires-Dist: google-generativeai (>=0.3.1,<0.4.0)
|
|
16
16
|
Requires-Dist: langchain-core (>=0.1,<0.2)
|
|
17
17
|
Requires-Dist: pillow (>=10.1.0,<11.0.0) ; extra == "images"
|
|
18
|
-
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
|
19
|
-
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/
|
|
18
|
+
Project-URL: Repository, https://github.com/langchain-ai/langchain-google
|
|
19
|
+
Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
|
|
20
20
|
Description-Content-Type: text/markdown
|
|
21
21
|
|
|
22
22
|
# langchain-google-genai
|
{langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/__init__.py
RENAMED
|
@@ -54,6 +54,8 @@ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
|
|
|
54
54
|
embeddings.embed_query("hello, world!")
|
|
55
55
|
```
|
|
56
56
|
""" # noqa: E501
|
|
57
|
+
|
|
58
|
+
from langchain_google_genai._enums import HarmBlockThreshold, HarmCategory
|
|
57
59
|
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
|
|
58
60
|
from langchain_google_genai.embeddings import GoogleGenerativeAIEmbeddings
|
|
59
61
|
from langchain_google_genai.llms import GoogleGenerativeAI
|
|
@@ -62,4 +64,6 @@ __all__ = [
|
|
|
62
64
|
"ChatGoogleGenerativeAI",
|
|
63
65
|
"GoogleGenerativeAIEmbeddings",
|
|
64
66
|
"GoogleGenerativeAI",
|
|
67
|
+
"HarmBlockThreshold",
|
|
68
|
+
"HarmCategory",
|
|
65
69
|
]
|
|
@@ -517,6 +517,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
517
517
|
"temperature": self.temperature,
|
|
518
518
|
"top_k": self.top_k,
|
|
519
519
|
"n": self.n,
|
|
520
|
+
"safety_settings": self.safety_settings,
|
|
520
521
|
}
|
|
521
522
|
|
|
522
523
|
def _prepare_params(
|
|
@@ -549,7 +550,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
549
550
|
params, chat, message = self._prepare_chat(
|
|
550
551
|
messages,
|
|
551
552
|
stop=stop,
|
|
552
|
-
|
|
553
|
+
**kwargs,
|
|
553
554
|
)
|
|
554
555
|
response: genai.types.GenerateContentResponse = _chat_with_retry(
|
|
555
556
|
content=message,
|
|
@@ -568,7 +569,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
568
569
|
params, chat, message = self._prepare_chat(
|
|
569
570
|
messages,
|
|
570
571
|
stop=stop,
|
|
571
|
-
|
|
572
|
+
**kwargs,
|
|
572
573
|
)
|
|
573
574
|
response: genai.types.GenerateContentResponse = await _achat_with_retry(
|
|
574
575
|
content=message,
|
|
@@ -587,7 +588,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
587
588
|
params, chat, message = self._prepare_chat(
|
|
588
589
|
messages,
|
|
589
590
|
stop=stop,
|
|
590
|
-
|
|
591
|
+
**kwargs,
|
|
591
592
|
)
|
|
592
593
|
response: genai.types.GenerateContentResponse = _chat_with_retry(
|
|
593
594
|
content=message,
|
|
@@ -598,6 +599,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
598
599
|
for chunk in response:
|
|
599
600
|
_chat_result = _response_to_result(chunk, stream=True)
|
|
600
601
|
gen = cast(ChatGenerationChunk, _chat_result.generations[0])
|
|
602
|
+
|
|
601
603
|
if run_manager:
|
|
602
604
|
run_manager.on_llm_new_token(gen.text)
|
|
603
605
|
yield gen
|
|
@@ -612,7 +614,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
612
614
|
params, chat, message = self._prepare_chat(
|
|
613
615
|
messages,
|
|
614
616
|
stop=stop,
|
|
615
|
-
|
|
617
|
+
**kwargs,
|
|
616
618
|
)
|
|
617
619
|
async for chunk in await _achat_with_retry(
|
|
618
620
|
content=message,
|
|
@@ -622,6 +624,7 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
622
624
|
):
|
|
623
625
|
_chat_result = _response_to_result(chunk, stream=True)
|
|
624
626
|
gen = cast(ChatGenerationChunk, _chat_result.generations[0])
|
|
627
|
+
|
|
625
628
|
if run_manager:
|
|
626
629
|
await run_manager.on_llm_new_token(gen.text)
|
|
627
630
|
yield gen
|
|
@@ -634,9 +637,14 @@ class ChatGoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseChatModel):
|
|
|
634
637
|
) -> Tuple[Dict[str, Any], genai.ChatSession, genai.types.ContentDict]:
|
|
635
638
|
client = self.client
|
|
636
639
|
functions = kwargs.pop("functions", None)
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
+
safety_settings = kwargs.pop("safety_settings", self.safety_settings)
|
|
641
|
+
if functions or safety_settings:
|
|
642
|
+
tools = (
|
|
643
|
+
convert_to_genai_function_declarations(functions) if functions else None
|
|
644
|
+
)
|
|
645
|
+
client = genai.GenerativeModel(
|
|
646
|
+
model_name=self.model, tools=tools, safety_settings=safety_settings
|
|
647
|
+
)
|
|
640
648
|
|
|
641
649
|
params = self._prepare_params(stop, **kwargs)
|
|
642
650
|
history = _parse_chat_history(
|
{langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/llms.py
RENAMED
|
@@ -15,6 +15,11 @@ from langchain_core.outputs import Generation, GenerationChunk, LLMResult
|
|
|
15
15
|
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
|
|
16
16
|
from langchain_core.utils import get_from_dict_or_env
|
|
17
17
|
|
|
18
|
+
from langchain_google_genai._enums import (
|
|
19
|
+
HarmBlockThreshold,
|
|
20
|
+
HarmCategory,
|
|
21
|
+
)
|
|
22
|
+
|
|
18
23
|
|
|
19
24
|
class GoogleModelFamily(str, Enum):
|
|
20
25
|
GEMINI = auto()
|
|
@@ -77,7 +82,10 @@ def _completion_with_retry(
|
|
|
77
82
|
try:
|
|
78
83
|
if is_gemini:
|
|
79
84
|
return llm.client.generate_content(
|
|
80
|
-
contents=prompt,
|
|
85
|
+
contents=prompt,
|
|
86
|
+
stream=stream,
|
|
87
|
+
generation_config=generation_config,
|
|
88
|
+
safety_settings=kwargs.pop("safety_settings", None),
|
|
81
89
|
)
|
|
82
90
|
return llm.client.generate_text(prompt=prompt, **kwargs)
|
|
83
91
|
except google.api_core.exceptions.FailedPrecondition as exc:
|
|
@@ -143,6 +151,22 @@ Supported examples:
|
|
|
143
151
|
description="A string, one of: [`rest`, `grpc`, `grpc_asyncio`].",
|
|
144
152
|
)
|
|
145
153
|
|
|
154
|
+
safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None
|
|
155
|
+
"""The default safety settings to use for all generations.
|
|
156
|
+
|
|
157
|
+
For example:
|
|
158
|
+
|
|
159
|
+
from google.generativeai.types.safety_types import HarmBlockThreshold, HarmCategory
|
|
160
|
+
|
|
161
|
+
safety_settings = {
|
|
162
|
+
HarmCategory.HARM_CATEGORY_UNSPECIFIED: HarmBlockThreshold.BLOCK_NONE,
|
|
163
|
+
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
|
164
|
+
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
|
165
|
+
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
|
|
166
|
+
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
|
167
|
+
}
|
|
168
|
+
""" # noqa: E501
|
|
169
|
+
|
|
146
170
|
@property
|
|
147
171
|
def lc_secrets(self) -> Dict[str, str]:
|
|
148
172
|
return {"google_api_key": "GOOGLE_API_KEY"}
|
|
@@ -184,6 +208,8 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
184
208
|
)
|
|
185
209
|
model_name = values["model"]
|
|
186
210
|
|
|
211
|
+
safety_settings = values["safety_settings"]
|
|
212
|
+
|
|
187
213
|
if isinstance(google_api_key, SecretStr):
|
|
188
214
|
google_api_key = google_api_key.get_secret_value()
|
|
189
215
|
|
|
@@ -193,8 +219,15 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
193
219
|
client_options=values.get("client_options"),
|
|
194
220
|
)
|
|
195
221
|
|
|
222
|
+
if safety_settings and (
|
|
223
|
+
not GoogleModelFamily(model_name) == GoogleModelFamily.GEMINI
|
|
224
|
+
):
|
|
225
|
+
raise ValueError("Safety settings are only supported for Gemini models")
|
|
226
|
+
|
|
196
227
|
if GoogleModelFamily(model_name) == GoogleModelFamily.GEMINI:
|
|
197
|
-
values["client"] = genai.GenerativeModel(
|
|
228
|
+
values["client"] = genai.GenerativeModel(
|
|
229
|
+
model_name=model_name, safety_settings=safety_settings
|
|
230
|
+
)
|
|
198
231
|
else:
|
|
199
232
|
values["client"] = genai
|
|
200
233
|
|
|
@@ -237,6 +270,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
237
270
|
is_gemini=True,
|
|
238
271
|
run_manager=run_manager,
|
|
239
272
|
generation_config=generation_config,
|
|
273
|
+
safety_settings=kwargs.pop("safety_settings", None),
|
|
240
274
|
)
|
|
241
275
|
candidates = [
|
|
242
276
|
"".join([p.text for p in c.content.parts]) for c in res.candidates
|
|
@@ -278,6 +312,7 @@ class GoogleGenerativeAI(_BaseGoogleGenerativeAI, BaseLLM):
|
|
|
278
312
|
is_gemini=True,
|
|
279
313
|
run_manager=run_manager,
|
|
280
314
|
generation_config=generation_config,
|
|
315
|
+
safety_settings=kwargs.pop("safety_settings", None),
|
|
281
316
|
**kwargs,
|
|
282
317
|
):
|
|
283
318
|
chunk = GenerationChunk(text=stream_resp.text)
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "langchain-google-genai"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.10rc0"
|
|
4
4
|
description = "An integration package connecting Google's genai package and LangChain"
|
|
5
5
|
authors = []
|
|
6
6
|
readme = "README.md"
|
|
7
|
-
repository = "https://github.com/langchain-ai/langchain"
|
|
7
|
+
repository = "https://github.com/langchain-ai/langchain-google"
|
|
8
8
|
license = "MIT"
|
|
9
9
|
|
|
10
10
|
[tool.poetry.urls]
|
|
11
|
-
"Source Code" = "https://github.com/langchain-ai/langchain/tree/
|
|
11
|
+
"Source Code" = "https://github.com/langchain-ai/langchain-google/tree/main/libs/genai"
|
|
12
12
|
|
|
13
13
|
[tool.poetry.dependencies]
|
|
14
14
|
python = ">=3.9,<4.0"
|
|
@@ -29,7 +29,6 @@ pytest-mock = "^3.10.0"
|
|
|
29
29
|
syrupy = "^4.0.2"
|
|
30
30
|
pytest-watcher = "^0.3.4"
|
|
31
31
|
pytest-asyncio = "^0.21.1"
|
|
32
|
-
langchain-core = { path = "../../core", develop = true }
|
|
33
32
|
numpy = "^1.26.2"
|
|
34
33
|
|
|
35
34
|
[tool.poetry.group.codespell]
|
|
@@ -53,7 +52,6 @@ ruff = "^0.1.5"
|
|
|
53
52
|
|
|
54
53
|
[tool.poetry.group.typing.dependencies]
|
|
55
54
|
mypy = "^0.991"
|
|
56
|
-
langchain-core = { path = "../../core", develop = true }
|
|
57
55
|
types-requests = "^2.28.11.5"
|
|
58
56
|
types-google-cloud-ndb = "^2.2.0.1"
|
|
59
57
|
types-pillow = "^10.1.0.2"
|
|
@@ -62,7 +60,6 @@ types-pillow = "^10.1.0.2"
|
|
|
62
60
|
optional = true
|
|
63
61
|
|
|
64
62
|
[tool.poetry.group.dev.dependencies]
|
|
65
|
-
langchain-core = { path = "../../core", develop = true }
|
|
66
63
|
pillow = "^10.1.0"
|
|
67
64
|
types-requests = "^2.31.0.10"
|
|
68
65
|
types-pillow = "^10.1.0.2"
|
|
File without changes
|
|
File without changes
|
{langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/_common.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{langchain_google_genai-0.0.8 → langchain_google_genai-0.0.10rc0}/langchain_google_genai/py.typed
RENAMED
|
File without changes
|