langchain-core 0.3.79__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +52 -65
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +19 -19
  8. langchain_core/caches.py +53 -63
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +323 -334
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +441 -507
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +48 -63
  17. langchain_core/document_loaders/base.py +23 -23
  18. langchain_core/document_loaders/langsmith.py +37 -37
  19. langchain_core/documents/__init__.py +0 -1
  20. langchain_core/documents/base.py +62 -65
  21. langchain_core/documents/compressor.py +4 -4
  22. langchain_core/documents/transformers.py +28 -29
  23. langchain_core/embeddings/fake.py +50 -54
  24. langchain_core/example_selectors/length_based.py +1 -1
  25. langchain_core/example_selectors/semantic_similarity.py +21 -25
  26. langchain_core/exceptions.py +10 -11
  27. langchain_core/globals.py +3 -151
  28. langchain_core/indexing/api.py +61 -66
  29. langchain_core/indexing/base.py +58 -58
  30. langchain_core/indexing/in_memory.py +3 -3
  31. langchain_core/language_models/__init__.py +14 -27
  32. langchain_core/language_models/_utils.py +270 -84
  33. langchain_core/language_models/base.py +55 -162
  34. langchain_core/language_models/chat_models.py +442 -402
  35. langchain_core/language_models/fake.py +11 -11
  36. langchain_core/language_models/fake_chat_models.py +61 -39
  37. langchain_core/language_models/llms.py +123 -231
  38. langchain_core/load/dump.py +4 -5
  39. langchain_core/load/load.py +18 -28
  40. langchain_core/load/mapping.py +2 -4
  41. langchain_core/load/serializable.py +39 -40
  42. langchain_core/messages/__init__.py +61 -22
  43. langchain_core/messages/ai.py +368 -163
  44. langchain_core/messages/base.py +214 -43
  45. langchain_core/messages/block_translators/__init__.py +111 -0
  46. langchain_core/messages/block_translators/anthropic.py +470 -0
  47. langchain_core/messages/block_translators/bedrock.py +94 -0
  48. langchain_core/messages/block_translators/bedrock_converse.py +297 -0
  49. langchain_core/messages/block_translators/google_genai.py +530 -0
  50. langchain_core/messages/block_translators/google_vertexai.py +21 -0
  51. langchain_core/messages/block_translators/groq.py +143 -0
  52. langchain_core/messages/block_translators/langchain_v0.py +301 -0
  53. langchain_core/messages/block_translators/openai.py +1010 -0
  54. langchain_core/messages/chat.py +2 -6
  55. langchain_core/messages/content.py +1423 -0
  56. langchain_core/messages/function.py +6 -10
  57. langchain_core/messages/human.py +41 -38
  58. langchain_core/messages/modifier.py +2 -2
  59. langchain_core/messages/system.py +38 -28
  60. langchain_core/messages/tool.py +96 -103
  61. langchain_core/messages/utils.py +478 -504
  62. langchain_core/output_parsers/__init__.py +1 -14
  63. langchain_core/output_parsers/base.py +58 -61
  64. langchain_core/output_parsers/json.py +7 -8
  65. langchain_core/output_parsers/list.py +5 -7
  66. langchain_core/output_parsers/openai_functions.py +49 -47
  67. langchain_core/output_parsers/openai_tools.py +14 -19
  68. langchain_core/output_parsers/pydantic.py +12 -13
  69. langchain_core/output_parsers/string.py +2 -2
  70. langchain_core/output_parsers/transform.py +15 -17
  71. langchain_core/output_parsers/xml.py +8 -10
  72. langchain_core/outputs/__init__.py +1 -1
  73. langchain_core/outputs/chat_generation.py +18 -18
  74. langchain_core/outputs/chat_result.py +1 -3
  75. langchain_core/outputs/generation.py +8 -8
  76. langchain_core/outputs/llm_result.py +10 -10
  77. langchain_core/prompt_values.py +12 -12
  78. langchain_core/prompts/__init__.py +3 -27
  79. langchain_core/prompts/base.py +45 -55
  80. langchain_core/prompts/chat.py +254 -313
  81. langchain_core/prompts/dict.py +5 -5
  82. langchain_core/prompts/few_shot.py +81 -88
  83. langchain_core/prompts/few_shot_with_templates.py +11 -13
  84. langchain_core/prompts/image.py +12 -14
  85. langchain_core/prompts/loading.py +6 -8
  86. langchain_core/prompts/message.py +3 -3
  87. langchain_core/prompts/prompt.py +24 -39
  88. langchain_core/prompts/string.py +4 -4
  89. langchain_core/prompts/structured.py +42 -50
  90. langchain_core/rate_limiters.py +51 -60
  91. langchain_core/retrievers.py +49 -190
  92. langchain_core/runnables/base.py +1484 -1709
  93. langchain_core/runnables/branch.py +45 -61
  94. langchain_core/runnables/config.py +80 -88
  95. langchain_core/runnables/configurable.py +117 -134
  96. langchain_core/runnables/fallbacks.py +83 -79
  97. langchain_core/runnables/graph.py +85 -95
  98. langchain_core/runnables/graph_ascii.py +27 -28
  99. langchain_core/runnables/graph_mermaid.py +38 -50
  100. langchain_core/runnables/graph_png.py +15 -16
  101. langchain_core/runnables/history.py +135 -148
  102. langchain_core/runnables/passthrough.py +124 -150
  103. langchain_core/runnables/retry.py +46 -51
  104. langchain_core/runnables/router.py +25 -30
  105. langchain_core/runnables/schema.py +79 -74
  106. langchain_core/runnables/utils.py +62 -68
  107. langchain_core/stores.py +81 -115
  108. langchain_core/structured_query.py +8 -8
  109. langchain_core/sys_info.py +27 -29
  110. langchain_core/tools/__init__.py +1 -14
  111. langchain_core/tools/base.py +179 -187
  112. langchain_core/tools/convert.py +131 -139
  113. langchain_core/tools/render.py +10 -10
  114. langchain_core/tools/retriever.py +11 -11
  115. langchain_core/tools/simple.py +19 -24
  116. langchain_core/tools/structured.py +30 -39
  117. langchain_core/tracers/__init__.py +1 -9
  118. langchain_core/tracers/base.py +97 -99
  119. langchain_core/tracers/context.py +29 -52
  120. langchain_core/tracers/core.py +50 -60
  121. langchain_core/tracers/evaluation.py +11 -11
  122. langchain_core/tracers/event_stream.py +115 -70
  123. langchain_core/tracers/langchain.py +21 -21
  124. langchain_core/tracers/log_stream.py +43 -43
  125. langchain_core/tracers/memory_stream.py +3 -3
  126. langchain_core/tracers/root_listeners.py +16 -16
  127. langchain_core/tracers/run_collector.py +2 -4
  128. langchain_core/tracers/schemas.py +0 -129
  129. langchain_core/tracers/stdout.py +3 -3
  130. langchain_core/utils/__init__.py +1 -4
  131. langchain_core/utils/_merge.py +46 -8
  132. langchain_core/utils/aiter.py +57 -61
  133. langchain_core/utils/env.py +9 -9
  134. langchain_core/utils/function_calling.py +89 -191
  135. langchain_core/utils/html.py +7 -8
  136. langchain_core/utils/input.py +6 -6
  137. langchain_core/utils/interactive_env.py +1 -1
  138. langchain_core/utils/iter.py +37 -42
  139. langchain_core/utils/json.py +4 -3
  140. langchain_core/utils/json_schema.py +8 -8
  141. langchain_core/utils/mustache.py +9 -11
  142. langchain_core/utils/pydantic.py +33 -35
  143. langchain_core/utils/strings.py +5 -5
  144. langchain_core/utils/usage.py +1 -1
  145. langchain_core/utils/utils.py +80 -54
  146. langchain_core/vectorstores/base.py +129 -164
  147. langchain_core/vectorstores/in_memory.py +99 -174
  148. langchain_core/vectorstores/utils.py +5 -5
  149. langchain_core/version.py +1 -1
  150. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/METADATA +28 -27
  151. langchain_core-1.0.0.dist-info/RECORD +172 -0
  152. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
  153. langchain_core/beta/__init__.py +0 -1
  154. langchain_core/beta/runnables/__init__.py +0 -1
  155. langchain_core/beta/runnables/context.py +0 -447
  156. langchain_core/memory.py +0 -120
  157. langchain_core/messages/content_blocks.py +0 -176
  158. langchain_core/prompts/pipeline.py +0 -138
  159. langchain_core/pydantic_v1/__init__.py +0 -30
  160. langchain_core/pydantic_v1/dataclasses.py +0 -23
  161. langchain_core/pydantic_v1/main.py +0 -23
  162. langchain_core/tracers/langchain_v1.py +0 -31
  163. langchain_core/utils/loading.py +0 -35
  164. langchain_core-0.3.79.dist-info/RECORD +0 -174
  165. langchain_core-0.3.79.dist-info/entry_points.txt +0 -4
@@ -3,7 +3,7 @@
3
3
  import asyncio
4
4
  import time
5
5
  from collections.abc import AsyncIterator, Iterator, Mapping
6
- from typing import Any, Optional
6
+ from typing import Any
7
7
 
8
8
  from typing_extensions import override
9
9
 
@@ -23,7 +23,7 @@ class FakeListLLM(LLM):
23
23
  """List of responses to return in order."""
24
24
  # This parameter should be removed from FakeListLLM since
25
25
  # it's only used by sub-classes.
26
- sleep: Optional[float] = None
26
+ sleep: float | None = None
27
27
  """Sleep time in seconds between responses.
28
28
 
29
29
  Ignored by FakeListLLM, but used by sub-classes.
@@ -44,8 +44,8 @@ class FakeListLLM(LLM):
44
44
  def _call(
45
45
  self,
46
46
  prompt: str,
47
- stop: Optional[list[str]] = None,
48
- run_manager: Optional[CallbackManagerForLLMRun] = None,
47
+ stop: list[str] | None = None,
48
+ run_manager: CallbackManagerForLLMRun | None = None,
49
49
  **kwargs: Any,
50
50
  ) -> str:
51
51
  """Return next response."""
@@ -60,8 +60,8 @@ class FakeListLLM(LLM):
60
60
  async def _acall(
61
61
  self,
62
62
  prompt: str,
63
- stop: Optional[list[str]] = None,
64
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
63
+ stop: list[str] | None = None,
64
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
65
65
  **kwargs: Any,
66
66
  ) -> str:
67
67
  """Return next response."""
@@ -91,16 +91,16 @@ class FakeStreamingListLLM(FakeListLLM):
91
91
  chunks in a streaming implementation.
92
92
  """
93
93
 
94
- error_on_chunk_number: Optional[int] = None
94
+ error_on_chunk_number: int | None = None
95
95
  """If set, will raise an exception on the specified chunk number."""
96
96
 
97
97
  @override
98
98
  def stream(
99
99
  self,
100
100
  input: LanguageModelInput,
101
- config: Optional[RunnableConfig] = None,
101
+ config: RunnableConfig | None = None,
102
102
  *,
103
- stop: Optional[list[str]] = None,
103
+ stop: list[str] | None = None,
104
104
  **kwargs: Any,
105
105
  ) -> Iterator[str]:
106
106
  result = self.invoke(input, config)
@@ -119,9 +119,9 @@ class FakeStreamingListLLM(FakeListLLM):
119
119
  async def astream(
120
120
  self,
121
121
  input: LanguageModelInput,
122
- config: Optional[RunnableConfig] = None,
122
+ config: RunnableConfig | None = None,
123
123
  *,
124
- stop: Optional[list[str]] = None,
124
+ stop: list[str] | None = None,
125
125
  **kwargs: Any,
126
126
  ) -> AsyncIterator[str]:
127
127
  result = await self.ainvoke(input, config)
@@ -1,10 +1,10 @@
1
- """Fake ChatModel for testing purposes."""
1
+ """Fake chat model for testing purposes."""
2
2
 
3
3
  import asyncio
4
4
  import re
5
5
  import time
6
6
  from collections.abc import AsyncIterator, Iterator
7
- from typing import Any, Optional, Union, cast
7
+ from typing import Any, Literal, cast
8
8
 
9
9
  from typing_extensions import override
10
10
 
@@ -19,11 +19,11 @@ from langchain_core.runnables import RunnableConfig
19
19
 
20
20
 
21
21
  class FakeMessagesListChatModel(BaseChatModel):
22
- """Fake ``ChatModel`` for testing purposes."""
22
+ """Fake chat model for testing purposes."""
23
23
 
24
24
  responses: list[BaseMessage]
25
25
  """List of responses to **cycle** through in order."""
26
- sleep: Optional[float] = None
26
+ sleep: float | None = None
27
27
  """Sleep time in seconds between responses."""
28
28
  i: int = 0
29
29
  """Internally incremented after every model invocation."""
@@ -32,8 +32,8 @@ class FakeMessagesListChatModel(BaseChatModel):
32
32
  def _generate(
33
33
  self,
34
34
  messages: list[BaseMessage],
35
- stop: Optional[list[str]] = None,
36
- run_manager: Optional[CallbackManagerForLLMRun] = None,
35
+ stop: list[str] | None = None,
36
+ run_manager: CallbackManagerForLLMRun | None = None,
37
37
  **kwargs: Any,
38
38
  ) -> ChatResult:
39
39
  if self.sleep is not None:
@@ -57,14 +57,14 @@ class FakeListChatModelError(Exception):
57
57
 
58
58
 
59
59
  class FakeListChatModel(SimpleChatModel):
60
- """Fake ChatModel for testing purposes."""
60
+ """Fake chat model for testing purposes."""
61
61
 
62
62
  responses: list[str]
63
63
  """List of responses to **cycle** through in order."""
64
- sleep: Optional[float] = None
64
+ sleep: float | None = None
65
65
  i: int = 0
66
66
  """Internally incremented after every model invocation."""
67
- error_on_chunk_number: Optional[int] = None
67
+ error_on_chunk_number: int | None = None
68
68
  """If set, raise an error on the specified chunk number during streaming."""
69
69
 
70
70
  @property
@@ -95,8 +95,8 @@ class FakeListChatModel(SimpleChatModel):
95
95
  def _stream(
96
96
  self,
97
97
  messages: list[BaseMessage],
98
- stop: Union[list[str], None] = None,
99
- run_manager: Union[CallbackManagerForLLMRun, None] = None,
98
+ stop: list[str] | None = None,
99
+ run_manager: CallbackManagerForLLMRun | None = None,
100
100
  **kwargs: Any,
101
101
  ) -> Iterator[ChatGenerationChunk]:
102
102
  response = self.responses[self.i]
@@ -113,14 +113,19 @@ class FakeListChatModel(SimpleChatModel):
113
113
  ):
114
114
  raise FakeListChatModelError
115
115
 
116
- yield ChatGenerationChunk(message=AIMessageChunk(content=c))
116
+ chunk_position: Literal["last"] | None = (
117
+ "last" if i_c == len(response) - 1 else None
118
+ )
119
+ yield ChatGenerationChunk(
120
+ message=AIMessageChunk(content=c, chunk_position=chunk_position)
121
+ )
117
122
 
118
123
  @override
119
124
  async def _astream(
120
125
  self,
121
126
  messages: list[BaseMessage],
122
- stop: Union[list[str], None] = None,
123
- run_manager: Union[AsyncCallbackManagerForLLMRun, None] = None,
127
+ stop: list[str] | None = None,
128
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
124
129
  **kwargs: Any,
125
130
  ) -> AsyncIterator[ChatGenerationChunk]:
126
131
  response = self.responses[self.i]
@@ -136,7 +141,12 @@ class FakeListChatModel(SimpleChatModel):
136
141
  and i_c == self.error_on_chunk_number
137
142
  ):
138
143
  raise FakeListChatModelError
139
- yield ChatGenerationChunk(message=AIMessageChunk(content=c))
144
+ chunk_position: Literal["last"] | None = (
145
+ "last" if i_c == len(response) - 1 else None
146
+ )
147
+ yield ChatGenerationChunk(
148
+ message=AIMessageChunk(content=c, chunk_position=chunk_position)
149
+ )
140
150
 
141
151
  @property
142
152
  @override
@@ -148,27 +158,33 @@ class FakeListChatModel(SimpleChatModel):
148
158
  def batch(
149
159
  self,
150
160
  inputs: list[Any],
151
- config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None,
161
+ config: RunnableConfig | list[RunnableConfig] | None = None,
152
162
  *,
153
163
  return_exceptions: bool = False,
154
164
  **kwargs: Any,
155
- ) -> list[BaseMessage]:
165
+ ) -> list[AIMessage]:
156
166
  if isinstance(config, list):
157
- return [self.invoke(m, c, **kwargs) for m, c in zip(inputs, config)]
167
+ return [
168
+ self.invoke(m, c, **kwargs)
169
+ for m, c in zip(inputs, config, strict=False)
170
+ ]
158
171
  return [self.invoke(m, config, **kwargs) for m in inputs]
159
172
 
160
173
  @override
161
174
  async def abatch(
162
175
  self,
163
176
  inputs: list[Any],
164
- config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None,
177
+ config: RunnableConfig | list[RunnableConfig] | None = None,
165
178
  *,
166
179
  return_exceptions: bool = False,
167
180
  **kwargs: Any,
168
- ) -> list[BaseMessage]:
181
+ ) -> list[AIMessage]:
169
182
  if isinstance(config, list):
170
183
  # do Not use an async iterator here because need explicit ordering
171
- return [await self.ainvoke(m, c, **kwargs) for m, c in zip(inputs, config)]
184
+ return [
185
+ await self.ainvoke(m, c, **kwargs)
186
+ for m, c in zip(inputs, config, strict=False)
187
+ ]
172
188
  # do Not use an async iterator here because need explicit ordering
173
189
  return [await self.ainvoke(m, config, **kwargs) for m in inputs]
174
190
 
@@ -180,8 +196,8 @@ class FakeChatModel(SimpleChatModel):
180
196
  def _call(
181
197
  self,
182
198
  messages: list[BaseMessage],
183
- stop: Optional[list[str]] = None,
184
- run_manager: Optional[CallbackManagerForLLMRun] = None,
199
+ stop: list[str] | None = None,
200
+ run_manager: CallbackManagerForLLMRun | None = None,
185
201
  **kwargs: Any,
186
202
  ) -> str:
187
203
  return "fake response"
@@ -190,8 +206,8 @@ class FakeChatModel(SimpleChatModel):
190
206
  async def _agenerate(
191
207
  self,
192
208
  messages: list[BaseMessage],
193
- stop: Optional[list[str]] = None,
194
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
209
+ stop: list[str] | None = None,
210
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
195
211
  **kwargs: Any,
196
212
  ) -> ChatResult:
197
213
  output_str = "fake response"
@@ -212,23 +228,23 @@ class GenericFakeChatModel(BaseChatModel):
212
228
  """Generic fake chat model that can be used to test the chat model interface.
213
229
 
214
230
  * Chat model should be usable in both sync and async tests
215
- * Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
216
- tokens.
231
+ * Invokes `on_llm_new_token` to allow for testing of callback related code for new
232
+ tokens.
217
233
  * Includes logic to break messages into message chunk to facilitate testing of
218
- streaming.
234
+ streaming.
219
235
 
220
236
  """
221
237
 
222
- messages: Iterator[Union[AIMessage, str]]
238
+ messages: Iterator[AIMessage | str]
223
239
  """Get an iterator over messages.
224
240
 
225
241
  This can be expanded to accept other types like Callables / dicts / strings
226
242
  to make the interface more generic if needed.
227
243
 
228
- .. note::
229
- if you want to pass a list, you can use ``iter`` to convert it to an iterator.
244
+ !!! note
245
+ if you want to pass a list, you can use `iter` to convert it to an iterator.
230
246
 
231
- .. warning::
247
+ !!! warning
232
248
  Streaming is not implemented yet. We should try to implement it in the future by
233
249
  delegating to invoke and then breaking the resulting output into message chunks.
234
250
 
@@ -238,8 +254,8 @@ class GenericFakeChatModel(BaseChatModel):
238
254
  def _generate(
239
255
  self,
240
256
  messages: list[BaseMessage],
241
- stop: Optional[list[str]] = None,
242
- run_manager: Optional[CallbackManagerForLLMRun] = None,
257
+ stop: list[str] | None = None,
258
+ run_manager: CallbackManagerForLLMRun | None = None,
243
259
  **kwargs: Any,
244
260
  ) -> ChatResult:
245
261
  message = next(self.messages)
@@ -250,8 +266,8 @@ class GenericFakeChatModel(BaseChatModel):
250
266
  def _stream(
251
267
  self,
252
268
  messages: list[BaseMessage],
253
- stop: Optional[list[str]] = None,
254
- run_manager: Optional[CallbackManagerForLLMRun] = None,
269
+ stop: list[str] | None = None,
270
+ run_manager: CallbackManagerForLLMRun | None = None,
255
271
  **kwargs: Any,
256
272
  ) -> Iterator[ChatGenerationChunk]:
257
273
  chat_result = self._generate(
@@ -284,10 +300,16 @@ class GenericFakeChatModel(BaseChatModel):
284
300
 
285
301
  content_chunks = cast("list[str]", re.split(r"(\s)", content))
286
302
 
287
- for token in content_chunks:
303
+ for idx, token in enumerate(content_chunks):
288
304
  chunk = ChatGenerationChunk(
289
305
  message=AIMessageChunk(content=token, id=message.id)
290
306
  )
307
+ if (
308
+ idx == len(content_chunks) - 1
309
+ and isinstance(chunk.message, AIMessageChunk)
310
+ and not message.additional_kwargs
311
+ ):
312
+ chunk.message.chunk_position = "last"
291
313
  if run_manager:
292
314
  run_manager.on_llm_new_token(token, chunk=chunk)
293
315
  yield chunk
@@ -360,8 +382,8 @@ class ParrotFakeChatModel(BaseChatModel):
360
382
  def _generate(
361
383
  self,
362
384
  messages: list[BaseMessage],
363
- stop: Optional[list[str]] = None,
364
- run_manager: Optional[CallbackManagerForLLMRun] = None,
385
+ stop: list[str] | None = None,
386
+ run_manager: CallbackManagerForLLMRun | None = None,
365
387
  **kwargs: Any,
366
388
  ) -> ChatResult:
367
389
  return ChatResult(generations=[ChatGeneration(message=messages[-1])])