langchain-google-genai 2.1.5__tar.gz → 2.1.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-google-genai might be problematic. Click here for more details.

Files changed (18) hide show
  1. langchain_google_genai-2.1.7/PKG-INFO +260 -0
  2. langchain_google_genai-2.1.7/README.md +238 -0
  3. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/_function_utils.py +70 -0
  4. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/chat_models.py +230 -115
  5. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/embeddings.py +4 -1
  6. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/pyproject.toml +5 -5
  7. langchain_google_genai-2.1.5/PKG-INFO +0 -174
  8. langchain_google_genai-2.1.5/README.md +0 -152
  9. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/LICENSE +0 -0
  10. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/__init__.py +0 -0
  11. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/_common.py +0 -0
  12. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/_enums.py +0 -0
  13. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/_genai_extension.py +0 -0
  14. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/_image_utils.py +0 -0
  15. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/genai_aqa.py +0 -0
  16. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/google_vector_store.py +0 -0
  17. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/llms.py +0 -0
  18. {langchain_google_genai-2.1.5 → langchain_google_genai-2.1.7}/langchain_google_genai/py.typed +0 -0
@@ -0,0 +1,260 @@
1
+ Metadata-Version: 2.1
2
+ Name: langchain-google-genai
3
+ Version: 2.1.7
4
+ Summary: An integration package connecting Google's genai package and LangChain
5
+ Home-page: https://github.com/langchain-ai/langchain-google
6
+ License: MIT
7
+ Requires-Python: >=3.9,<4.0
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.9
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Requires-Dist: filetype (>=1.2.0,<2.0.0)
15
+ Requires-Dist: google-ai-generativelanguage (>=0.6.18,<0.7.0)
16
+ Requires-Dist: langchain-core (>=0.3.68,<0.4.0)
17
+ Requires-Dist: pydantic (>=2,<3)
18
+ Project-URL: Repository, https://github.com/langchain-ai/langchain-google
19
+ Project-URL: Source Code, https://github.com/langchain-ai/langchain-google/tree/main/libs/genai
20
+ Description-Content-Type: text/markdown
21
+
22
+ # langchain-google-genai
23
+
24
+ **LangChain integration for Google Gemini models using the `generative-ai` SDK**
25
+
26
+ This package enables seamless access to Google Gemini's chat, vision, embeddings, and retrieval-augmented generation (RAG) features within the LangChain ecosystem.
27
+
28
+ ---
29
+
30
+ ## Table of Contents
31
+
32
+ - [Overview](#overview)
33
+ - [Installation](#installation)
34
+ - [Quickstart](#quickstart)
35
+ - [Chat Models](#chat-models)
36
+ - [Multimodal Inputs](#multimodal-inputs)
37
+ - [Multimodal Outputs](#multimodal-outputs)
38
+ - [Multimodal Outputs in Chains](#multimodal-outputs-in-chains)
39
+ - [Thinking Support](#thinking-support)
40
+ - [Embeddings](#embeddings)
41
+ - [Semantic Retrieval (RAG)](#semantic-retrieval-rag)
42
+
43
+ ---
44
+
45
+ ## Overview
46
+
47
+ This package provides LangChain support for Google Gemini models (via the official [Google Generative AI SDK](https://googleapis.github.io/python-genai/)). It supports:
48
+
49
+ - Text and vision-based chat models
50
+ - Embeddings for semantic search
51
+ - Multimodal inputs and outputs
52
+ - Retrieval-Augmented Generation (RAG)
53
+ - Thought tracing with reasoning tokens
54
+
55
+ ---
56
+
57
+ ## Installation
58
+
59
+ ```bash
60
+ pip install -U langchain-google-genai
61
+ ````
62
+
63
+ ---
64
+
65
+ ## Quickstart
66
+
67
+ Set up your environment variable with your Gemini API key:
68
+
69
+ ```bash
70
+ export GOOGLE_API_KEY=your-api-key
71
+ ```
72
+
73
+ Then use the `ChatGoogleGenerativeAI` interface:
74
+
75
+ ```python
76
+ from langchain_google_genai import ChatGoogleGenerativeAI
77
+
78
+ llm = ChatGoogleGenerativeAI(model="gemini-pro")
79
+ response = llm.invoke("Sing a ballad of LangChain.")
80
+ print(response.content)
81
+ ```
82
+
83
+ ---
84
+
85
+ ## Chat Models
86
+
87
+ The main interface for Gemini chat models is `ChatGoogleGenerativeAI`.
88
+
89
+ ### Multimodal Inputs
90
+
91
+ Gemini vision models support image inputs in single messages.
92
+
93
+ ```python
94
+ from langchain_core.messages import HumanMessage
95
+ from langchain_google_genai import ChatGoogleGenerativeAI
96
+
97
+ llm = ChatGoogleGenerativeAI(model="gemini-pro-vision")
98
+
99
+ message = HumanMessage(
100
+ content=[
101
+ {"type": "text", "text": "What's in this image?"},
102
+ {"type": "image_url", "image_url": "https://picsum.photos/seed/picsum/200/300"},
103
+ ]
104
+ )
105
+
106
+ response = llm.invoke([message])
107
+ print(response.content)
108
+ ```
109
+
110
+ ✅ `image_url` can be:
111
+
112
+ * A public image URL
113
+ * A Google Cloud Storage path (`gcs://...`)
114
+ * A base64-encoded image (e.g., `data:image/png;base64,...`)
115
+
116
+ ---
117
+
118
+ ### Multimodal Outputs
119
+
120
+ The Gemini 2.0 Flash Experimental model supports both text and inline image outputs.
121
+
122
+ ```python
123
+ from langchain_google_genai import ChatGoogleGenerativeAI
124
+
125
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash-exp-image-generation")
126
+
127
+ response = llm.invoke(
128
+ "Generate an image of a cat and say meow",
129
+ generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
130
+ )
131
+
132
+ image_base64 = response.content[0].get("image_url").get("url").split(",")[-1]
133
+ meow_text = response.content[1]
134
+ print(meow_text)
135
+ ```
136
+
137
+ ---
138
+
139
+ ### Audio Output
140
+
141
+ ```
142
+ from langchain_google_genai import ChatGoogleGenerativeAI
143
+
144
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-tts")
145
+ # example
146
+ response = llm.invoke(
147
+ "Please say The quick brown fox jumps over the lazy dog",
148
+ generation_config=dict(response_modalities=["AUDIO"]),
149
+ )
150
+
151
+ # Base64 encoded binary data of the image
152
+ wav_data = response.additional_kwargs.get("audio")
153
+ with open("output.wav", "wb") as f:
154
+ f.write(wav_data)
155
+ ```
156
+
157
+ ---
158
+
159
+ ### Multimodal Outputs in Chains
160
+
161
+ You can use Gemini models in a LangChain chain:
162
+
163
+ ```python
164
+ from langchain_core.runnables import RunnablePassthrough
165
+ from langchain_core.prompts import ChatPromptTemplate
166
+ from langchain_google_genai import ChatGoogleGenerativeAI, Modality
167
+
168
+ llm = ChatGoogleGenerativeAI(
169
+ model="models/gemini-2.0-flash-exp-image-generation",
170
+ response_modalities=[Modality.TEXT, Modality.IMAGE],
171
+ )
172
+
173
+ prompt = ChatPromptTemplate.from_messages([
174
+ ("human", "Generate an image of {animal} and tell me the sound it makes.")
175
+ ])
176
+
177
+ chain = {"animal": RunnablePassthrough()} | prompt | llm
178
+ response = chain.invoke("cat")
179
+ ```
180
+
181
+ ---
182
+
183
+ ### Thinking Support
184
+
185
+ Gemini 2.5 Flash Preview supports internal reasoning ("thoughts").
186
+
187
+ ```python
188
+ from langchain_google_genai import ChatGoogleGenerativeAI
189
+
190
+ llm = ChatGoogleGenerativeAI(
191
+ model="models/gemini-2.5-flash-preview-04-17",
192
+ thinking_budget=1024
193
+ )
194
+
195
+ response = llm.invoke("How many O's are in Google? How did you verify your answer?")
196
+ reasoning_score = response.usage_metadata["output_token_details"]["reasoning"]
197
+
198
+ print("Response:", response.content)
199
+ print("Reasoning tokens used:", reasoning_score)
200
+ ```
201
+
202
+ ---
203
+
204
+ ## Embeddings
205
+
206
+ You can use Gemini embeddings in LangChain:
207
+
208
+ ```python
209
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
210
+
211
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
212
+ vector = embeddings.embed_query("hello, world!")
213
+ print(vector)
214
+ ```
215
+
216
+ ---
217
+
218
+ ## Semantic Retrieval (RAG)
219
+
220
+ Use Gemini with RAG to retrieve relevant documents from your knowledge base.
221
+
222
+ ```python
223
+ from langchain_google_genai.vectorstores import GoogleVectorStore
224
+ from langchain_text_splitters import CharacterTextSplitter
225
+ from langchain_community.document_loaders import DirectoryLoader
226
+
227
+ # Create a corpus (collection of documents)
228
+ corpus_store = GoogleVectorStore.create_corpus(display_name="My Corpus")
229
+
230
+ # Create a document under that corpus
231
+ document_store = GoogleVectorStore.create_document(
232
+ corpus_id=corpus_store.corpus_id, display_name="My Document"
233
+ )
234
+
235
+ # Load and upload documents
236
+ text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
237
+ for file in DirectoryLoader(path="data/").load():
238
+ chunks = text_splitter.split_documents([file])
239
+ document_store.add_documents(chunks)
240
+
241
+ # Query the document corpus
242
+ aqa = corpus_store.as_aqa()
243
+ response = aqa.invoke("What is the meaning of life?")
244
+
245
+ print("Answer:", response.answer)
246
+ print("Passages:", response.attributed_passages)
247
+ print("Answerable probability:", response.answerable_probability)
248
+ ```
249
+
250
+ ---
251
+
252
+
253
+ ## Resources
254
+
255
+ * [LangChain Documentation](https://docs.langchain.com/)
256
+ * [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
257
+ * [Gemini Model Documentation](https://ai.google.dev/)
258
+
259
+
260
+
@@ -0,0 +1,238 @@
1
+ # langchain-google-genai
2
+
3
+ **LangChain integration for Google Gemini models using the `generative-ai` SDK**
4
+
5
+ This package enables seamless access to Google Gemini's chat, vision, embeddings, and retrieval-augmented generation (RAG) features within the LangChain ecosystem.
6
+
7
+ ---
8
+
9
+ ## Table of Contents
10
+
11
+ - [Overview](#overview)
12
+ - [Installation](#installation)
13
+ - [Quickstart](#quickstart)
14
+ - [Chat Models](#chat-models)
15
+ - [Multimodal Inputs](#multimodal-inputs)
16
+ - [Multimodal Outputs](#multimodal-outputs)
17
+ - [Multimodal Outputs in Chains](#multimodal-outputs-in-chains)
18
+ - [Thinking Support](#thinking-support)
19
+ - [Embeddings](#embeddings)
20
+ - [Semantic Retrieval (RAG)](#semantic-retrieval-rag)
21
+
22
+ ---
23
+
24
+ ## Overview
25
+
26
+ This package provides LangChain support for Google Gemini models (via the official [Google Generative AI SDK](https://googleapis.github.io/python-genai/)). It supports:
27
+
28
+ - Text and vision-based chat models
29
+ - Embeddings for semantic search
30
+ - Multimodal inputs and outputs
31
+ - Retrieval-Augmented Generation (RAG)
32
+ - Thought tracing with reasoning tokens
33
+
34
+ ---
35
+
36
+ ## Installation
37
+
38
+ ```bash
39
+ pip install -U langchain-google-genai
40
+ ````
41
+
42
+ ---
43
+
44
+ ## Quickstart
45
+
46
+ Set up your environment variable with your Gemini API key:
47
+
48
+ ```bash
49
+ export GOOGLE_API_KEY=your-api-key
50
+ ```
51
+
52
+ Then use the `ChatGoogleGenerativeAI` interface:
53
+
54
+ ```python
55
+ from langchain_google_genai import ChatGoogleGenerativeAI
56
+
57
+ llm = ChatGoogleGenerativeAI(model="gemini-pro")
58
+ response = llm.invoke("Sing a ballad of LangChain.")
59
+ print(response.content)
60
+ ```
61
+
62
+ ---
63
+
64
+ ## Chat Models
65
+
66
+ The main interface for Gemini chat models is `ChatGoogleGenerativeAI`.
67
+
68
+ ### Multimodal Inputs
69
+
70
+ Gemini vision models support image inputs in single messages.
71
+
72
+ ```python
73
+ from langchain_core.messages import HumanMessage
74
+ from langchain_google_genai import ChatGoogleGenerativeAI
75
+
76
+ llm = ChatGoogleGenerativeAI(model="gemini-pro-vision")
77
+
78
+ message = HumanMessage(
79
+ content=[
80
+ {"type": "text", "text": "What's in this image?"},
81
+ {"type": "image_url", "image_url": "https://picsum.photos/seed/picsum/200/300"},
82
+ ]
83
+ )
84
+
85
+ response = llm.invoke([message])
86
+ print(response.content)
87
+ ```
88
+
89
+ ✅ `image_url` can be:
90
+
91
+ * A public image URL
92
+ * A Google Cloud Storage path (`gcs://...`)
93
+ * A base64-encoded image (e.g., `data:image/png;base64,...`)
94
+
95
+ ---
96
+
97
+ ### Multimodal Outputs
98
+
99
+ The Gemini 2.0 Flash Experimental model supports both text and inline image outputs.
100
+
101
+ ```python
102
+ from langchain_google_genai import ChatGoogleGenerativeAI
103
+
104
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.0-flash-exp-image-generation")
105
+
106
+ response = llm.invoke(
107
+ "Generate an image of a cat and say meow",
108
+ generation_config=dict(response_modalities=["TEXT", "IMAGE"]),
109
+ )
110
+
111
+ image_base64 = response.content[0].get("image_url").get("url").split(",")[-1]
112
+ meow_text = response.content[1]
113
+ print(meow_text)
114
+ ```
115
+
116
+ ---
117
+
118
+ ### Audio Output
119
+
120
+ ```
121
+ from langchain_google_genai import ChatGoogleGenerativeAI
122
+
123
+ llm = ChatGoogleGenerativeAI(model="models/gemini-2.5-flash-preview-tts")
124
+ # example
125
+ response = llm.invoke(
126
+ "Please say The quick brown fox jumps over the lazy dog",
127
+ generation_config=dict(response_modalities=["AUDIO"]),
128
+ )
129
+
130
+ # Base64 encoded binary data of the image
131
+ wav_data = response.additional_kwargs.get("audio")
132
+ with open("output.wav", "wb") as f:
133
+ f.write(wav_data)
134
+ ```
135
+
136
+ ---
137
+
138
+ ### Multimodal Outputs in Chains
139
+
140
+ You can use Gemini models in a LangChain chain:
141
+
142
+ ```python
143
+ from langchain_core.runnables import RunnablePassthrough
144
+ from langchain_core.prompts import ChatPromptTemplate
145
+ from langchain_google_genai import ChatGoogleGenerativeAI, Modality
146
+
147
+ llm = ChatGoogleGenerativeAI(
148
+ model="models/gemini-2.0-flash-exp-image-generation",
149
+ response_modalities=[Modality.TEXT, Modality.IMAGE],
150
+ )
151
+
152
+ prompt = ChatPromptTemplate.from_messages([
153
+ ("human", "Generate an image of {animal} and tell me the sound it makes.")
154
+ ])
155
+
156
+ chain = {"animal": RunnablePassthrough()} | prompt | llm
157
+ response = chain.invoke("cat")
158
+ ```
159
+
160
+ ---
161
+
162
+ ### Thinking Support
163
+
164
+ Gemini 2.5 Flash Preview supports internal reasoning ("thoughts").
165
+
166
+ ```python
167
+ from langchain_google_genai import ChatGoogleGenerativeAI
168
+
169
+ llm = ChatGoogleGenerativeAI(
170
+ model="models/gemini-2.5-flash-preview-04-17",
171
+ thinking_budget=1024
172
+ )
173
+
174
+ response = llm.invoke("How many O's are in Google? How did you verify your answer?")
175
+ reasoning_score = response.usage_metadata["output_token_details"]["reasoning"]
176
+
177
+ print("Response:", response.content)
178
+ print("Reasoning tokens used:", reasoning_score)
179
+ ```
180
+
181
+ ---
182
+
183
+ ## Embeddings
184
+
185
+ You can use Gemini embeddings in LangChain:
186
+
187
+ ```python
188
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
189
+
190
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
191
+ vector = embeddings.embed_query("hello, world!")
192
+ print(vector)
193
+ ```
194
+
195
+ ---
196
+
197
+ ## Semantic Retrieval (RAG)
198
+
199
+ Use Gemini with RAG to retrieve relevant documents from your knowledge base.
200
+
201
+ ```python
202
+ from langchain_google_genai.vectorstores import GoogleVectorStore
203
+ from langchain_text_splitters import CharacterTextSplitter
204
+ from langchain_community.document_loaders import DirectoryLoader
205
+
206
+ # Create a corpus (collection of documents)
207
+ corpus_store = GoogleVectorStore.create_corpus(display_name="My Corpus")
208
+
209
+ # Create a document under that corpus
210
+ document_store = GoogleVectorStore.create_document(
211
+ corpus_id=corpus_store.corpus_id, display_name="My Document"
212
+ )
213
+
214
+ # Load and upload documents
215
+ text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
216
+ for file in DirectoryLoader(path="data/").load():
217
+ chunks = text_splitter.split_documents([file])
218
+ document_store.add_documents(chunks)
219
+
220
+ # Query the document corpus
221
+ aqa = corpus_store.as_aqa()
222
+ response = aqa.invoke("What is the meaning of life?")
223
+
224
+ print("Answer:", response.answer)
225
+ print("Passages:", response.attributed_passages)
226
+ print("Answerable probability:", response.answerable_probability)
227
+ ```
228
+
229
+ ---
230
+
231
+
232
+ ## Resources
233
+
234
+ * [LangChain Documentation](https://docs.langchain.com/)
235
+ * [Google Generative AI SDK](https://googleapis.github.io/python-genai/)
236
+ * [Gemini Model Documentation](https://ai.google.dev/)
237
+
238
+
@@ -30,6 +30,7 @@ from langchain_core.utils.function_calling import (
30
30
  from langchain_core.utils.json_schema import dereference_refs
31
31
  from pydantic import BaseModel
32
32
  from pydantic.v1 import BaseModel as BaseModelV1
33
+ from typing_extensions import NotRequired
33
34
 
34
35
  logger = logging.getLogger(__name__)
35
36
 
@@ -65,11 +66,15 @@ _GoogleSearchRetrievalLike = Union[
65
66
  gapic.GoogleSearchRetrieval,
66
67
  Dict[str, Any],
67
68
  ]
69
+ _GoogleSearchLike = Union[gapic.Tool.GoogleSearch, Dict[str, Any]]
70
+ _CodeExecutionLike = Union[gapic.CodeExecution, Dict[str, Any]]
68
71
 
69
72
 
70
73
  class _ToolDict(TypedDict):
71
74
  function_declarations: Sequence[_FunctionDeclarationLike]
72
75
  google_search_retrieval: Optional[_GoogleSearchRetrievalLike]
76
+ google_search: NotRequired[_GoogleSearchLike]
77
+ code_execution: NotRequired[_CodeExecutionLike]
73
78
 
74
79
 
75
80
  # Info: This means one tool=Sequence of FunctionDeclaration
@@ -158,6 +163,8 @@ def convert_to_genai_function_declarations(
158
163
  for f in [
159
164
  "function_declarations",
160
165
  "google_search_retrieval",
166
+ "google_search",
167
+ "code_execution",
161
168
  ]
162
169
  ):
163
170
  fd = _format_to_gapic_function_declaration(tool) # type: ignore[arg-type]
@@ -184,6 +191,12 @@ def convert_to_genai_function_declarations(
184
191
  gapic_tool.google_search_retrieval = gapic.GoogleSearchRetrieval(
185
192
  tool["google_search_retrieval"]
186
193
  )
194
+ if "google_search" in tool:
195
+ gapic_tool.google_search = gapic.Tool.GoogleSearch(
196
+ tool["google_search"]
197
+ )
198
+ if "code_execution" in tool:
199
+ gapic_tool.code_execution = gapic.CodeExecution(tool["code_execution"])
187
200
  else:
188
201
  fd = _format_to_gapic_function_declaration(tool) # type: ignore[arg-type]
189
202
  gapic_tool.function_declarations.append(fd)
@@ -520,3 +533,60 @@ def safe_import(module_name: str, attribute_name: str = "") -> bool:
520
533
  return True
521
534
  except ImportError:
522
535
  return False
536
+
537
+
538
+ def replace_defs_in_schema(original_schema: dict, defs: Optional[dict] = None) -> dict:
539
+ """Given an OpenAPI schema with a property '$defs' replaces all occurrences of
540
+ referenced items in the dictionary.
541
+
542
+ Args:
543
+ original_schema: Schema generated by `BaseModel.model_schema_json`
544
+ defs: Definitions for recursive calls.
545
+
546
+ Returns:
547
+ Schema with refs replaced.
548
+ """
549
+
550
+ new_defs = defs or original_schema.get("$defs")
551
+
552
+ if new_defs is None or not isinstance(new_defs, dict):
553
+ return original_schema.copy()
554
+
555
+ resulting_schema = {}
556
+
557
+ for key, value in original_schema.items():
558
+ if key == "$defs":
559
+ continue
560
+
561
+ if not isinstance(value, dict):
562
+ resulting_schema[key] = value
563
+ else:
564
+ if "$ref" in value:
565
+ new_value = value.copy()
566
+
567
+ path = new_value.pop("$ref")
568
+ def_key = _get_def_key_from_schema_path(path)
569
+ new_item = new_defs.get(def_key)
570
+
571
+ assert isinstance(new_item, dict)
572
+ new_value.update(new_item)
573
+
574
+ resulting_schema[key] = replace_defs_in_schema(new_value, defs=new_defs)
575
+ else:
576
+ resulting_schema[key] = replace_defs_in_schema(value, defs=new_defs)
577
+
578
+ return resulting_schema
579
+
580
+
581
+ def _get_def_key_from_schema_path(schema_path: str) -> str:
582
+ error_message = f"Malformed schema reference path {schema_path}"
583
+
584
+ if not isinstance(schema_path, str) or not schema_path.startswith("#/$defs/"):
585
+ raise ValueError(error_message)
586
+
587
+ # Schema has to have only one extra level.
588
+ parts = schema_path.split("/")
589
+ if len(parts) != 3:
590
+ raise ValueError(error_message)
591
+
592
+ return parts[-1]