langchain-google-genai 3.0.0rc1__tar.gz → 3.0.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_google_genai-3.0.2/.gitignore +1 -0
- langchain_google_genai-3.0.2/Makefile +70 -0
- langchain_google_genai-3.0.2/PKG-INFO +50 -0
- langchain_google_genai-3.0.2/README.md +30 -0
- langchain_google_genai-3.0.2/langchain_google_genai/__init__.py +93 -0
- langchain_google_genai-3.0.2/langchain_google_genai/_common.py +221 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/_compat.py +11 -7
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/_enums.py +2 -1
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/_function_utils.py +37 -86
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/_genai_extension.py +69 -69
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/_image_utils.py +11 -9
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/chat_models.py +714 -713
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/embeddings.py +142 -106
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/genai_aqa.py +29 -17
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/google_vector_store.py +89 -70
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/llms.py +15 -12
- langchain_google_genai-3.0.2/pyproject.toml +153 -0
- langchain_google_genai-3.0.2/scripts/check_imports.py +17 -0
- langchain_google_genai-3.0.2/scripts/lint_imports.sh +17 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/test_chat_models.py +116 -58
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/test_standard.py +32 -52
- langchain_google_genai-3.0.2/tests/integration_tests/test_structured_output_integration.py +316 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/test_chat_models.py +597 -34
- langchain_google_genai-3.0.2/tests/unit_tests/test_common.py +68 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/test_embeddings.py +57 -1
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/test_function_utils.py +18 -75
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/test_imports.py +1 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/test_llms.py +46 -1
- langchain_google_genai-3.0.2/uv.lock +2080 -0
- langchain_google_genai-3.0.0rc1/PKG-INFO +0 -265
- langchain_google_genai-3.0.0rc1/README.md +0 -250
- langchain_google_genai-3.0.0rc1/langchain_google_genai/__init__.py +0 -87
- langchain_google_genai-3.0.0rc1/langchain_google_genai/_common.py +0 -175
- langchain_google_genai-3.0.0rc1/pyproject.toml +0 -138
- langchain_google_genai-3.0.0rc1/tests/unit_tests/test_common.py +0 -31
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/LICENSE +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/py.typed +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/__init__.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/conftest.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/.env.example +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/__init__.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/terraform/main.tf +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/test_callbacks.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/test_compile.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/test_embeddings.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/test_function_call.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/test_llms.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/integration_tests/test_tools.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/__init__.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/__snapshots__/test_standard.ambr +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/test_chat_models_protobuf_fix.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/test_genai_aqa.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/test_google_vector_store.py +0 -0
- {langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/tests/unit_tests/test_standard.py +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__pycache__
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
.PHONY: all format lint test tests integration_tests help
|
|
2
|
+
|
|
3
|
+
# Default target executed when no arguments are given to make.
|
|
4
|
+
all: help
|
|
5
|
+
|
|
6
|
+
.EXPORT_ALL_VARIABLES:
|
|
7
|
+
UV_FROZEN = true
|
|
8
|
+
|
|
9
|
+
# Define a variable for the test file path.
|
|
10
|
+
TEST_FILE ?= tests/unit_tests/
|
|
11
|
+
|
|
12
|
+
integration_test integration_tests: TEST_FILE = tests/integration_tests/
|
|
13
|
+
|
|
14
|
+
test tests:
|
|
15
|
+
uv run --group test pytest --disable-socket --allow-unix-socket $(TEST_FILE)
|
|
16
|
+
|
|
17
|
+
integration_test integration_tests:
|
|
18
|
+
uv run --group test --group test_integration pytest --retries 3 --retry-delay 1 $(TEST_FILE)
|
|
19
|
+
|
|
20
|
+
check_imports: $(shell find langchain_google_genai -name '*.py')
|
|
21
|
+
uv run --all-groups python ./scripts/check_imports.py $^
|
|
22
|
+
|
|
23
|
+
test_watch:
|
|
24
|
+
uv run ptw --snapshot-update --now . -- -vv $(TEST_FILE)
|
|
25
|
+
|
|
26
|
+
# Run unit tests and generate a coverage report.
|
|
27
|
+
coverage:
|
|
28
|
+
uv run pytest --cov \
|
|
29
|
+
--cov-config=.coveragerc \
|
|
30
|
+
--cov-report xml \
|
|
31
|
+
--cov-report term-missing:skip-covered \
|
|
32
|
+
$(TEST_FILE)
|
|
33
|
+
|
|
34
|
+
######################
|
|
35
|
+
# LINTING AND FORMATTING
|
|
36
|
+
######################
|
|
37
|
+
|
|
38
|
+
# Define a variable for Python and notebook files.
|
|
39
|
+
PYTHON_FILES=.
|
|
40
|
+
MYPY_CACHE=.mypy_cache
|
|
41
|
+
lint format: PYTHON_FILES=.
|
|
42
|
+
lint_diff format_diff: PYTHON_FILES=$(shell git diff --name-only --diff-filter=d main | grep -E '\.py$$|\.ipynb$$')
|
|
43
|
+
lint_package: PYTHON_FILES=langchain_google_genai
|
|
44
|
+
lint_tests: PYTHON_FILES=tests
|
|
45
|
+
lint_tests: MYPY_CACHE=.mypy_cache_test
|
|
46
|
+
|
|
47
|
+
lint lint_diff lint_package lint_tests:
|
|
48
|
+
./scripts/lint_imports.sh
|
|
49
|
+
[ "$(PYTHON_FILES)" = "" ] || uv run --all-groups ruff check $(PYTHON_FILES)
|
|
50
|
+
[ "$(PYTHON_FILES)" = "" ] || uv run --all-groups ruff format $(PYTHON_FILES) --diff
|
|
51
|
+
[ "$(PYTHON_FILES)" = "" ] || uv run --all-groups ruff check --select I $(PYTHON_FILES)
|
|
52
|
+
[ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && uv run --all-groups mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE)
|
|
53
|
+
|
|
54
|
+
format format_diff:
|
|
55
|
+
[ "$(PYTHON_FILES)" = "" ] || uv run --all-groups ruff format $(PYTHON_FILES)
|
|
56
|
+
[ "$(PYTHON_FILES)" = "" ] || uv run --all-groups ruff check --fix $(PYTHON_FILES)
|
|
57
|
+
|
|
58
|
+
######################
|
|
59
|
+
# HELP
|
|
60
|
+
######################
|
|
61
|
+
|
|
62
|
+
help:
|
|
63
|
+
@echo '----'
|
|
64
|
+
@echo 'check_imports - check imports'
|
|
65
|
+
@echo 'format - run code formatters'
|
|
66
|
+
@echo 'lint - run linters'
|
|
67
|
+
@echo 'test - run unit tests'
|
|
68
|
+
@echo 'tests - run unit tests'
|
|
69
|
+
@echo 'integration_test - run integration tests(NOTE: "export GOOGLE_API_KEY=..." is needed.)'
|
|
70
|
+
@echo 'test TEST_FILE=<test_file> - run all tests in file'
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: langchain-google-genai
|
|
3
|
+
Version: 3.0.2
|
|
4
|
+
Summary: An integration package connecting Google's genai package and LangChain
|
|
5
|
+
Project-URL: Homepage, https://docs.langchain.com/oss/python/integrations/providers/google
|
|
6
|
+
Project-URL: Documentation, https://reference.langchain.com/python/integrations/langchain_google_genai/
|
|
7
|
+
Project-URL: Source, https://github.com/langchain-ai/langchain-google/tree/master/libs/genai
|
|
8
|
+
Project-URL: Changelog, https://github.com/langchain-ai/langchain-google/releases?q=%22genai%22
|
|
9
|
+
Project-URL: Twitter, https://x.com/LangChainAI
|
|
10
|
+
Project-URL: Slack, https://www.langchain.com/join-community
|
|
11
|
+
Project-URL: Reddit, https://www.reddit.com/r/LangChain/
|
|
12
|
+
License: MIT
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Requires-Python: <4.0.0,>=3.10.0
|
|
15
|
+
Requires-Dist: filetype<2.0.0,>=1.2.0
|
|
16
|
+
Requires-Dist: google-ai-generativelanguage<1.0.0,>=0.7.0
|
|
17
|
+
Requires-Dist: langchain-core<2.0.0,>=1.0.0
|
|
18
|
+
Requires-Dist: pydantic<3.0.0,>=2.0.0
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
|
|
21
|
+
# langchain-google-genai
|
|
22
|
+
|
|
23
|
+
[](https://pypi.org/project/langchain-google-genai/#history)
|
|
24
|
+
[](https://opensource.org/licenses/MIT)
|
|
25
|
+
[](https://pypistats.org/packages/langchain-google-genai)
|
|
26
|
+
[](https://twitter.com/langchainai)
|
|
27
|
+
|
|
28
|
+
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
|
29
|
+
|
|
30
|
+
This package provides access to Google Gemini's chat, vision, embeddings, and other capabilities within the LangChain ecosystem.
|
|
31
|
+
|
|
32
|
+
## Quick Install
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
pip install langchain-google-genai
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## 📖 Documentation
|
|
39
|
+
|
|
40
|
+
For full documentation, see the [API reference](https://reference.langchain.com/python/integrations/langchain_google_genai/). For conceptual guides, tutorials, and examples on using these classes, see the [LangChain Docs](https://docs.langchain.com/oss/python/integrations/providers/google#google-generative-ai).
|
|
41
|
+
|
|
42
|
+
## 📕 Releases & Versioning
|
|
43
|
+
|
|
44
|
+
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
|
45
|
+
|
|
46
|
+
## 💁 Contributing
|
|
47
|
+
|
|
48
|
+
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
|
49
|
+
|
|
50
|
+
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# langchain-google-genai
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/langchain-google-genai/#history)
|
|
4
|
+
[](https://opensource.org/licenses/MIT)
|
|
5
|
+
[](https://pypistats.org/packages/langchain-google-genai)
|
|
6
|
+
[](https://twitter.com/langchainai)
|
|
7
|
+
|
|
8
|
+
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
|
9
|
+
|
|
10
|
+
This package provides access to Google Gemini's chat, vision, embeddings, and other capabilities within the LangChain ecosystem.
|
|
11
|
+
|
|
12
|
+
## Quick Install
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
pip install langchain-google-genai
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## 📖 Documentation
|
|
19
|
+
|
|
20
|
+
For full documentation, see the [API reference](https://reference.langchain.com/python/integrations/langchain_google_genai/). For conceptual guides, tutorials, and examples on using these classes, see the [LangChain Docs](https://docs.langchain.com/oss/python/integrations/providers/google#google-generative-ai).
|
|
21
|
+
|
|
22
|
+
## 📕 Releases & Versioning
|
|
23
|
+
|
|
24
|
+
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
|
25
|
+
|
|
26
|
+
## 💁 Contributing
|
|
27
|
+
|
|
28
|
+
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
|
29
|
+
|
|
30
|
+
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
"""LangChain Google Generative AI Integration (GenAI).
|
|
2
|
+
|
|
3
|
+
This module integrates Google's Generative AI models, specifically the Gemini series,
|
|
4
|
+
with the LangChain framework. It provides classes for interacting with chat models and
|
|
5
|
+
generating embeddings, leveraging Google's advanced AI capabilities.
|
|
6
|
+
|
|
7
|
+
**Chat Models**
|
|
8
|
+
|
|
9
|
+
The `ChatGoogleGenerativeAI` class is the primary interface for interacting with
|
|
10
|
+
Google's Gemini chat models. It allows users to send and receive messages using a
|
|
11
|
+
specified Gemini model, suitable for various conversational AI applications.
|
|
12
|
+
|
|
13
|
+
**LLMs**
|
|
14
|
+
|
|
15
|
+
The `GoogleGenerativeAI` class is the primary interface for interacting with Google's
|
|
16
|
+
Gemini LLMs. It allows users to generate text using a specified Gemini model.
|
|
17
|
+
|
|
18
|
+
**Embeddings**
|
|
19
|
+
|
|
20
|
+
The `GoogleGenerativeAIEmbeddings` class provides functionalities to generate embeddings
|
|
21
|
+
using Google's models. These embeddings can be used for a range of NLP tasks, including
|
|
22
|
+
semantic analysis, similarity comparisons, and more.
|
|
23
|
+
|
|
24
|
+
**Using Chat Models**
|
|
25
|
+
|
|
26
|
+
After setting up your environment with the required API key, you can interact with the
|
|
27
|
+
Google Gemini models.
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
31
|
+
|
|
32
|
+
llm = ChatGoogleGenerativeAI(model="gemini-2.5-pro")
|
|
33
|
+
llm.invoke("Sing a ballad of LangChain.")
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
**Using LLMs**
|
|
37
|
+
|
|
38
|
+
The package also supports generating text with Google's models.
|
|
39
|
+
|
|
40
|
+
```python
|
|
41
|
+
from langchain_google_genai import GoogleGenerativeAI
|
|
42
|
+
|
|
43
|
+
llm = GoogleGenerativeAI(model="gemini-2.5-pro")
|
|
44
|
+
llm.invoke("Once upon a time, a library called LangChain")
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
**Embedding Generation**
|
|
48
|
+
|
|
49
|
+
The package also supports creating embeddings with Google's models, useful for textual
|
|
50
|
+
similarity and other NLP applications.
|
|
51
|
+
|
|
52
|
+
```python
|
|
53
|
+
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
|
54
|
+
|
|
55
|
+
embeddings = GoogleGenerativeAIEmbeddings(model="models/gemini-embedding-001")
|
|
56
|
+
embeddings.embed_query("hello, world!")
|
|
57
|
+
```
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
from langchain_google_genai._enums import (
|
|
61
|
+
HarmBlockThreshold,
|
|
62
|
+
HarmCategory,
|
|
63
|
+
MediaResolution,
|
|
64
|
+
Modality,
|
|
65
|
+
)
|
|
66
|
+
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
|
|
67
|
+
from langchain_google_genai.embeddings import GoogleGenerativeAIEmbeddings
|
|
68
|
+
from langchain_google_genai.genai_aqa import (
|
|
69
|
+
AqaInput,
|
|
70
|
+
AqaOutput,
|
|
71
|
+
GenAIAqa,
|
|
72
|
+
)
|
|
73
|
+
from langchain_google_genai.google_vector_store import (
|
|
74
|
+
DoesNotExistsException,
|
|
75
|
+
GoogleVectorStore,
|
|
76
|
+
)
|
|
77
|
+
from langchain_google_genai.llms import GoogleGenerativeAI
|
|
78
|
+
|
|
79
|
+
__all__ = [
|
|
80
|
+
"AqaInput",
|
|
81
|
+
"AqaOutput",
|
|
82
|
+
"ChatGoogleGenerativeAI",
|
|
83
|
+
"DoesNotExistsException",
|
|
84
|
+
"DoesNotExistsException",
|
|
85
|
+
"GenAIAqa",
|
|
86
|
+
"GoogleGenerativeAI",
|
|
87
|
+
"GoogleGenerativeAIEmbeddings",
|
|
88
|
+
"GoogleVectorStore",
|
|
89
|
+
"HarmBlockThreshold",
|
|
90
|
+
"HarmCategory",
|
|
91
|
+
"MediaResolution",
|
|
92
|
+
"Modality",
|
|
93
|
+
]
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from importlib import metadata
|
|
3
|
+
from typing import Any, TypedDict
|
|
4
|
+
|
|
5
|
+
from google.api_core.gapic_v1.client_info import ClientInfo
|
|
6
|
+
from langchain_core.utils import secret_from_env
|
|
7
|
+
from pydantic import BaseModel, Field, SecretStr
|
|
8
|
+
|
|
9
|
+
from langchain_google_genai._enums import (
|
|
10
|
+
HarmBlockThreshold,
|
|
11
|
+
HarmCategory,
|
|
12
|
+
MediaResolution,
|
|
13
|
+
Modality,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
_TELEMETRY_TAG = "remote_reasoning_engine"
|
|
17
|
+
_TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID"
|
|
18
|
+
|
|
19
|
+
# Cache package version at module import time to avoid blocking I/O in async contexts
|
|
20
|
+
try:
|
|
21
|
+
LC_GOOGLE_GENAI_VERSION = metadata.version("langchain-google-genai")
|
|
22
|
+
except metadata.PackageNotFoundError:
|
|
23
|
+
LC_GOOGLE_GENAI_VERSION = "0.0.0"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class GoogleGenerativeAIError(Exception):
|
|
27
|
+
"""Custom exception class for errors associated with the `Google GenAI` API."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class _BaseGoogleGenerativeAI(BaseModel):
|
|
31
|
+
"""Base class for Google Generative AI LLMs."""
|
|
32
|
+
|
|
33
|
+
model: str = Field(...)
|
|
34
|
+
"""Model name to use."""
|
|
35
|
+
|
|
36
|
+
google_api_key: SecretStr | None = Field(
|
|
37
|
+
alias="api_key", default_factory=secret_from_env("GOOGLE_API_KEY", default=None)
|
|
38
|
+
)
|
|
39
|
+
"""Google AI API key.
|
|
40
|
+
|
|
41
|
+
If not specified will be read from env var `GOOGLE_API_KEY`.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
credentials: Any = None
|
|
45
|
+
"""The default custom credentials to use when making API calls.
|
|
46
|
+
|
|
47
|
+
If not provided, credentials will be ascertained from the `GOOGLE_API_KEY` env var.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
temperature: float = 0.7
|
|
51
|
+
"""Run inference with this temperature.
|
|
52
|
+
|
|
53
|
+
Must be within `[0.0, 2.0]`.
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
top_p: float | None = None
|
|
57
|
+
"""Decode using nucleus sampling.
|
|
58
|
+
|
|
59
|
+
Consider the smallest set of tokens whose probability sum is at least `top_p`.
|
|
60
|
+
|
|
61
|
+
Must be within `[0.0, 1.0]`.
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
top_k: int | None = None
|
|
65
|
+
"""Decode using top-k sampling: consider the set of `top_k` most probable tokens.
|
|
66
|
+
|
|
67
|
+
Must be positive.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
max_output_tokens: int | None = Field(default=None, alias="max_tokens")
|
|
71
|
+
"""Maximum number of tokens to include in a candidate.
|
|
72
|
+
|
|
73
|
+
Must be greater than zero.
|
|
74
|
+
|
|
75
|
+
If unset, will use the model's default value, which varies by model.
|
|
76
|
+
|
|
77
|
+
See [docs](https://ai.google.dev/gemini-api/docs/models) for model-specific limits.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
n: int = 1
|
|
81
|
+
"""Number of chat completions to generate for each prompt.
|
|
82
|
+
|
|
83
|
+
Note that the API may not return the full `n` completions if duplicates are
|
|
84
|
+
generated.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
max_retries: int = Field(default=6, alias="retries")
|
|
88
|
+
"""The maximum number of retries to make when generating."""
|
|
89
|
+
|
|
90
|
+
timeout: float | None = Field(default=None, alias="request_timeout")
|
|
91
|
+
"""The maximum number of seconds to wait for a response."""
|
|
92
|
+
|
|
93
|
+
client_options: dict | None = Field(
|
|
94
|
+
default=None,
|
|
95
|
+
)
|
|
96
|
+
"""A dictionary of client options to pass to the Google API client.
|
|
97
|
+
|
|
98
|
+
Example: `api_endpoint`
|
|
99
|
+
|
|
100
|
+
!!! warning
|
|
101
|
+
|
|
102
|
+
If both `client_options['api_endpoint']` and `base_url` are specified,
|
|
103
|
+
the `api_endpoint` in `client_options` takes precedence.
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
base_url: str | None = Field(
|
|
107
|
+
default=None,
|
|
108
|
+
)
|
|
109
|
+
"""Base URL to use for the API client.
|
|
110
|
+
|
|
111
|
+
This is a convenience alias for `client_options['api_endpoint']`.
|
|
112
|
+
|
|
113
|
+
!!! warning
|
|
114
|
+
|
|
115
|
+
If `client_options` already contains an `api_endpoint`, this parameter will be
|
|
116
|
+
ignored in favor of the existing value.
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
transport: str | None = Field(
|
|
120
|
+
default=None,
|
|
121
|
+
alias="api_transport",
|
|
122
|
+
)
|
|
123
|
+
"""A string, one of: `['rest', 'grpc', 'grpc_asyncio']`.
|
|
124
|
+
|
|
125
|
+
The Google client library defaults to `'grpc'` for sync clients.
|
|
126
|
+
|
|
127
|
+
For async clients, `'rest'` is converted to `'grpc_asyncio'` unless
|
|
128
|
+
a custom endpoint is specified.
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
additional_headers: dict[str, str] | None = Field(
|
|
132
|
+
default=None,
|
|
133
|
+
)
|
|
134
|
+
"""Key-value dictionary representing additional headers for the model call"""
|
|
135
|
+
|
|
136
|
+
response_modalities: list[Modality] | None = Field(
|
|
137
|
+
default=None,
|
|
138
|
+
)
|
|
139
|
+
"""A list of modalities of the response"""
|
|
140
|
+
|
|
141
|
+
thinking_budget: int | None = Field(
|
|
142
|
+
default=None,
|
|
143
|
+
)
|
|
144
|
+
"""Indicates the thinking budget in tokens."""
|
|
145
|
+
|
|
146
|
+
media_resolution: MediaResolution | None = Field(
|
|
147
|
+
default=None,
|
|
148
|
+
)
|
|
149
|
+
"""Media resolution for the input media."""
|
|
150
|
+
|
|
151
|
+
include_thoughts: bool | None = Field(
|
|
152
|
+
default=None,
|
|
153
|
+
)
|
|
154
|
+
"""Indicates whether to include thoughts in the response."""
|
|
155
|
+
|
|
156
|
+
safety_settings: dict[HarmCategory, HarmBlockThreshold] | None = None
|
|
157
|
+
"""Default safety settings to use for all generations.
|
|
158
|
+
|
|
159
|
+
!!! example
|
|
160
|
+
|
|
161
|
+
```python
|
|
162
|
+
from google.generativeai.types.safety_types import HarmBlockThreshold, HarmCategory
|
|
163
|
+
|
|
164
|
+
safety_settings = {
|
|
165
|
+
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
|
166
|
+
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
|
167
|
+
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
|
|
168
|
+
HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
|
|
169
|
+
}
|
|
170
|
+
```
|
|
171
|
+
""" # noqa: E501
|
|
172
|
+
|
|
173
|
+
@property
|
|
174
|
+
def lc_secrets(self) -> dict[str, str]:
|
|
175
|
+
return {"google_api_key": "GOOGLE_API_KEY"}
|
|
176
|
+
|
|
177
|
+
@property
|
|
178
|
+
def _identifying_params(self) -> dict[str, Any]:
|
|
179
|
+
"""Get the identifying parameters."""
|
|
180
|
+
return {
|
|
181
|
+
"model": self.model,
|
|
182
|
+
"temperature": self.temperature,
|
|
183
|
+
"top_p": self.top_p,
|
|
184
|
+
"top_k": self.top_k,
|
|
185
|
+
"max_output_tokens": self.max_output_tokens,
|
|
186
|
+
"candidate_count": self.n,
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def get_user_agent(module: str | None = None) -> tuple[str, str]:
|
|
191
|
+
r"""Returns a custom user agent header.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
module: The module for a custom user agent header.
|
|
195
|
+
"""
|
|
196
|
+
client_library_version = (
|
|
197
|
+
f"{LC_GOOGLE_GENAI_VERSION}-{module}" if module else LC_GOOGLE_GENAI_VERSION
|
|
198
|
+
)
|
|
199
|
+
if os.environ.get(_TELEMETRY_ENV_VARIABLE_NAME):
|
|
200
|
+
client_library_version += f"+{_TELEMETRY_TAG}"
|
|
201
|
+
return client_library_version, f"langchain-google-genai/{client_library_version}"
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def get_client_info(module: str | None = None) -> "ClientInfo":
|
|
205
|
+
r"""Returns a client info object with a custom user agent header.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
module: The module for a custom user agent header.
|
|
209
|
+
"""
|
|
210
|
+
client_library_version, user_agent = get_user_agent(module)
|
|
211
|
+
# TODO: remove ignore once google-auth has types.
|
|
212
|
+
return ClientInfo( # type: ignore[no-untyped-call]
|
|
213
|
+
client_library_version=client_library_version,
|
|
214
|
+
user_agent=user_agent,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
class SafetySettingDict(TypedDict):
|
|
219
|
+
category: HarmCategory
|
|
220
|
+
|
|
221
|
+
threshold: HarmBlockThreshold
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/_compat.py
RENAMED
|
@@ -1,18 +1,18 @@
|
|
|
1
1
|
"""Go from v1 content blocks to generativelanguage_v1beta format."""
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
-
from typing import Any,
|
|
4
|
+
from typing import Any, cast
|
|
5
5
|
|
|
6
6
|
from langchain_core.messages import content as types
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
def translate_citations_to_grounding_metadata(
|
|
10
|
-
citations: list[types.Citation], web_search_queries:
|
|
10
|
+
citations: list[types.Citation], web_search_queries: list[str] | None = None
|
|
11
11
|
) -> dict[str, Any]:
|
|
12
12
|
"""Translate LangChain Citations to Google AI grounding metadata format.
|
|
13
13
|
|
|
14
14
|
Args:
|
|
15
|
-
citations: List of Citation content blocks.
|
|
15
|
+
citations: List of `Citation` content blocks.
|
|
16
16
|
web_search_queries: Optional list of search queries that generated
|
|
17
17
|
the grounding data.
|
|
18
18
|
|
|
@@ -40,7 +40,7 @@ def translate_citations_to_grounding_metadata(
|
|
|
40
40
|
|
|
41
41
|
# Group citations by text segment (start_index, end_index, cited_text)
|
|
42
42
|
segment_to_citations: dict[
|
|
43
|
-
tuple[
|
|
43
|
+
tuple[int | None, int | None, str | None], list[types.Citation]
|
|
44
44
|
] = {}
|
|
45
45
|
|
|
46
46
|
for citation in citations:
|
|
@@ -131,7 +131,7 @@ def _convert_from_v1_to_generativelanguage_v1beta(
|
|
|
131
131
|
|
|
132
132
|
Returns:
|
|
133
133
|
List of dictionaries in `google.ai.generativelanguage_v1beta.types.Content`
|
|
134
|
-
|
|
134
|
+
format, ready to be sent to the API.
|
|
135
135
|
"""
|
|
136
136
|
new_content: list = []
|
|
137
137
|
for block in content:
|
|
@@ -143,6 +143,10 @@ def _convert_from_v1_to_generativelanguage_v1beta(
|
|
|
143
143
|
# TextContentBlock
|
|
144
144
|
if block_dict["type"] == "text":
|
|
145
145
|
new_block = {"text": block_dict.get("text", "")}
|
|
146
|
+
if (
|
|
147
|
+
thought_signature := (block_dict.get("extras") or {}).get("signature") # type: ignore[attr-defined]
|
|
148
|
+
) and model_provider == "google_genai":
|
|
149
|
+
new_block["thought_signature"] = thought_signature
|
|
146
150
|
new_content.append(new_block)
|
|
147
151
|
# Citations are only handled on output. Can't pass them back :/
|
|
148
152
|
|
|
@@ -243,7 +247,7 @@ def _convert_from_v1_to_generativelanguage_v1beta(
|
|
|
243
247
|
elif block_dict["type"] == "server_tool_call":
|
|
244
248
|
if block_dict.get("name") == "code_interpreter":
|
|
245
249
|
# LangChain v0 format
|
|
246
|
-
args = cast(dict, block_dict.get("args", {}))
|
|
250
|
+
args = cast("dict", block_dict.get("args", {}))
|
|
247
251
|
executable_code = {
|
|
248
252
|
"type": "executable_code",
|
|
249
253
|
"executable_code": args.get("code", ""),
|
|
@@ -261,7 +265,7 @@ def _convert_from_v1_to_generativelanguage_v1beta(
|
|
|
261
265
|
)
|
|
262
266
|
|
|
263
267
|
elif block_dict["type"] == "server_tool_result":
|
|
264
|
-
extras = cast(dict, block_dict.get("extras", {}))
|
|
268
|
+
extras = cast("dict", block_dict.get("extras", {}))
|
|
265
269
|
if extras.get("block_type") == "code_execution_result":
|
|
266
270
|
# LangChain v0 format
|
|
267
271
|
code_execution_result = {
|
{langchain_google_genai-3.0.0rc1 → langchain_google_genai-3.0.2}/langchain_google_genai/_enums.py
RENAMED
|
@@ -3,5 +3,6 @@ import google.ai.generativelanguage_v1beta as genai
|
|
|
3
3
|
HarmBlockThreshold = genai.SafetySetting.HarmBlockThreshold
|
|
4
4
|
HarmCategory = genai.HarmCategory
|
|
5
5
|
Modality = genai.GenerationConfig.Modality
|
|
6
|
+
MediaResolution = genai.GenerationConfig.MediaResolution
|
|
6
7
|
|
|
7
|
-
__all__ = ["HarmBlockThreshold", "HarmCategory", "Modality"]
|
|
8
|
+
__all__ = ["HarmBlockThreshold", "HarmCategory", "MediaResolution", "Modality"]
|