langchain-core 1.0.0a6__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +51 -64
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +55 -48
  8. langchain_core/caches.py +65 -66
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +321 -336
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +454 -514
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +53 -68
  17. langchain_core/document_loaders/base.py +27 -25
  18. langchain_core/document_loaders/blob_loaders.py +1 -1
  19. langchain_core/document_loaders/langsmith.py +44 -48
  20. langchain_core/documents/__init__.py +23 -3
  21. langchain_core/documents/base.py +102 -94
  22. langchain_core/documents/compressor.py +10 -10
  23. langchain_core/documents/transformers.py +34 -35
  24. langchain_core/embeddings/fake.py +50 -54
  25. langchain_core/example_selectors/length_based.py +2 -2
  26. langchain_core/example_selectors/semantic_similarity.py +28 -32
  27. langchain_core/exceptions.py +21 -20
  28. langchain_core/globals.py +3 -151
  29. langchain_core/indexing/__init__.py +1 -1
  30. langchain_core/indexing/api.py +121 -126
  31. langchain_core/indexing/base.py +73 -75
  32. langchain_core/indexing/in_memory.py +4 -6
  33. langchain_core/language_models/__init__.py +14 -29
  34. langchain_core/language_models/_utils.py +58 -61
  35. langchain_core/language_models/base.py +82 -172
  36. langchain_core/language_models/chat_models.py +329 -402
  37. langchain_core/language_models/fake.py +11 -11
  38. langchain_core/language_models/fake_chat_models.py +42 -36
  39. langchain_core/language_models/llms.py +189 -269
  40. langchain_core/load/dump.py +9 -12
  41. langchain_core/load/load.py +18 -28
  42. langchain_core/load/mapping.py +2 -4
  43. langchain_core/load/serializable.py +42 -40
  44. langchain_core/messages/__init__.py +10 -16
  45. langchain_core/messages/ai.py +148 -148
  46. langchain_core/messages/base.py +53 -51
  47. langchain_core/messages/block_translators/__init__.py +19 -22
  48. langchain_core/messages/block_translators/anthropic.py +6 -6
  49. langchain_core/messages/block_translators/bedrock_converse.py +5 -5
  50. langchain_core/messages/block_translators/google_genai.py +10 -7
  51. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  52. langchain_core/messages/block_translators/groq.py +117 -21
  53. langchain_core/messages/block_translators/langchain_v0.py +5 -5
  54. langchain_core/messages/block_translators/openai.py +11 -11
  55. langchain_core/messages/chat.py +2 -6
  56. langchain_core/messages/content.py +339 -330
  57. langchain_core/messages/function.py +6 -10
  58. langchain_core/messages/human.py +24 -31
  59. langchain_core/messages/modifier.py +2 -2
  60. langchain_core/messages/system.py +19 -29
  61. langchain_core/messages/tool.py +74 -90
  62. langchain_core/messages/utils.py +484 -510
  63. langchain_core/output_parsers/__init__.py +13 -10
  64. langchain_core/output_parsers/base.py +61 -61
  65. langchain_core/output_parsers/format_instructions.py +9 -4
  66. langchain_core/output_parsers/json.py +12 -10
  67. langchain_core/output_parsers/list.py +21 -23
  68. langchain_core/output_parsers/openai_functions.py +49 -47
  69. langchain_core/output_parsers/openai_tools.py +30 -23
  70. langchain_core/output_parsers/pydantic.py +13 -14
  71. langchain_core/output_parsers/string.py +5 -5
  72. langchain_core/output_parsers/transform.py +15 -17
  73. langchain_core/output_parsers/xml.py +35 -34
  74. langchain_core/outputs/__init__.py +1 -1
  75. langchain_core/outputs/chat_generation.py +18 -18
  76. langchain_core/outputs/chat_result.py +1 -3
  77. langchain_core/outputs/generation.py +16 -16
  78. langchain_core/outputs/llm_result.py +10 -10
  79. langchain_core/prompt_values.py +13 -19
  80. langchain_core/prompts/__init__.py +3 -27
  81. langchain_core/prompts/base.py +81 -86
  82. langchain_core/prompts/chat.py +308 -351
  83. langchain_core/prompts/dict.py +6 -6
  84. langchain_core/prompts/few_shot.py +81 -88
  85. langchain_core/prompts/few_shot_with_templates.py +11 -13
  86. langchain_core/prompts/image.py +12 -14
  87. langchain_core/prompts/loading.py +4 -6
  88. langchain_core/prompts/message.py +7 -7
  89. langchain_core/prompts/prompt.py +24 -39
  90. langchain_core/prompts/string.py +26 -10
  91. langchain_core/prompts/structured.py +49 -53
  92. langchain_core/rate_limiters.py +51 -60
  93. langchain_core/retrievers.py +61 -198
  94. langchain_core/runnables/base.py +1551 -1656
  95. langchain_core/runnables/branch.py +68 -70
  96. langchain_core/runnables/config.py +72 -89
  97. langchain_core/runnables/configurable.py +145 -161
  98. langchain_core/runnables/fallbacks.py +102 -96
  99. langchain_core/runnables/graph.py +91 -97
  100. langchain_core/runnables/graph_ascii.py +27 -28
  101. langchain_core/runnables/graph_mermaid.py +42 -51
  102. langchain_core/runnables/graph_png.py +43 -16
  103. langchain_core/runnables/history.py +175 -177
  104. langchain_core/runnables/passthrough.py +151 -167
  105. langchain_core/runnables/retry.py +46 -51
  106. langchain_core/runnables/router.py +30 -35
  107. langchain_core/runnables/schema.py +75 -80
  108. langchain_core/runnables/utils.py +60 -67
  109. langchain_core/stores.py +85 -121
  110. langchain_core/structured_query.py +8 -8
  111. langchain_core/sys_info.py +29 -29
  112. langchain_core/tools/__init__.py +1 -14
  113. langchain_core/tools/base.py +306 -245
  114. langchain_core/tools/convert.py +160 -155
  115. langchain_core/tools/render.py +10 -10
  116. langchain_core/tools/retriever.py +12 -11
  117. langchain_core/tools/simple.py +19 -24
  118. langchain_core/tools/structured.py +32 -39
  119. langchain_core/tracers/__init__.py +1 -9
  120. langchain_core/tracers/base.py +97 -99
  121. langchain_core/tracers/context.py +29 -52
  122. langchain_core/tracers/core.py +49 -53
  123. langchain_core/tracers/evaluation.py +11 -11
  124. langchain_core/tracers/event_stream.py +65 -64
  125. langchain_core/tracers/langchain.py +21 -21
  126. langchain_core/tracers/log_stream.py +45 -45
  127. langchain_core/tracers/memory_stream.py +3 -3
  128. langchain_core/tracers/root_listeners.py +16 -16
  129. langchain_core/tracers/run_collector.py +2 -4
  130. langchain_core/tracers/schemas.py +0 -129
  131. langchain_core/tracers/stdout.py +3 -3
  132. langchain_core/utils/__init__.py +1 -4
  133. langchain_core/utils/_merge.py +2 -2
  134. langchain_core/utils/aiter.py +57 -61
  135. langchain_core/utils/env.py +9 -9
  136. langchain_core/utils/function_calling.py +94 -188
  137. langchain_core/utils/html.py +7 -8
  138. langchain_core/utils/input.py +9 -6
  139. langchain_core/utils/interactive_env.py +1 -1
  140. langchain_core/utils/iter.py +36 -40
  141. langchain_core/utils/json.py +4 -3
  142. langchain_core/utils/json_schema.py +9 -9
  143. langchain_core/utils/mustache.py +8 -10
  144. langchain_core/utils/pydantic.py +35 -37
  145. langchain_core/utils/strings.py +6 -9
  146. langchain_core/utils/usage.py +1 -1
  147. langchain_core/utils/utils.py +66 -62
  148. langchain_core/vectorstores/base.py +182 -216
  149. langchain_core/vectorstores/in_memory.py +101 -176
  150. langchain_core/vectorstores/utils.py +5 -5
  151. langchain_core/version.py +1 -1
  152. langchain_core-1.0.4.dist-info/METADATA +69 -0
  153. langchain_core-1.0.4.dist-info/RECORD +172 -0
  154. {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.4.dist-info}/WHEEL +1 -1
  155. langchain_core/memory.py +0 -120
  156. langchain_core/messages/block_translators/ollama.py +0 -47
  157. langchain_core/prompts/pipeline.py +0 -138
  158. langchain_core/pydantic_v1/__init__.py +0 -30
  159. langchain_core/pydantic_v1/dataclasses.py +0 -23
  160. langchain_core/pydantic_v1/main.py +0 -23
  161. langchain_core/tracers/langchain_v1.py +0 -31
  162. langchain_core/utils/loading.py +0 -35
  163. langchain_core-1.0.0a6.dist-info/METADATA +0 -67
  164. langchain_core-1.0.0a6.dist-info/RECORD +0 -181
  165. langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
@@ -3,7 +3,7 @@
3
3
  import asyncio
4
4
  import time
5
5
  from collections.abc import AsyncIterator, Iterator, Mapping
6
- from typing import Any, Optional
6
+ from typing import Any
7
7
 
8
8
  from typing_extensions import override
9
9
 
@@ -23,7 +23,7 @@ class FakeListLLM(LLM):
23
23
  """List of responses to return in order."""
24
24
  # This parameter should be removed from FakeListLLM since
25
25
  # it's only used by sub-classes.
26
- sleep: Optional[float] = None
26
+ sleep: float | None = None
27
27
  """Sleep time in seconds between responses.
28
28
 
29
29
  Ignored by FakeListLLM, but used by sub-classes.
@@ -44,8 +44,8 @@ class FakeListLLM(LLM):
44
44
  def _call(
45
45
  self,
46
46
  prompt: str,
47
- stop: Optional[list[str]] = None,
48
- run_manager: Optional[CallbackManagerForLLMRun] = None,
47
+ stop: list[str] | None = None,
48
+ run_manager: CallbackManagerForLLMRun | None = None,
49
49
  **kwargs: Any,
50
50
  ) -> str:
51
51
  """Return next response."""
@@ -60,8 +60,8 @@ class FakeListLLM(LLM):
60
60
  async def _acall(
61
61
  self,
62
62
  prompt: str,
63
- stop: Optional[list[str]] = None,
64
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
63
+ stop: list[str] | None = None,
64
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
65
65
  **kwargs: Any,
66
66
  ) -> str:
67
67
  """Return next response."""
@@ -91,16 +91,16 @@ class FakeStreamingListLLM(FakeListLLM):
91
91
  chunks in a streaming implementation.
92
92
  """
93
93
 
94
- error_on_chunk_number: Optional[int] = None
94
+ error_on_chunk_number: int | None = None
95
95
  """If set, will raise an exception on the specified chunk number."""
96
96
 
97
97
  @override
98
98
  def stream(
99
99
  self,
100
100
  input: LanguageModelInput,
101
- config: Optional[RunnableConfig] = None,
101
+ config: RunnableConfig | None = None,
102
102
  *,
103
- stop: Optional[list[str]] = None,
103
+ stop: list[str] | None = None,
104
104
  **kwargs: Any,
105
105
  ) -> Iterator[str]:
106
106
  result = self.invoke(input, config)
@@ -119,9 +119,9 @@ class FakeStreamingListLLM(FakeListLLM):
119
119
  async def astream(
120
120
  self,
121
121
  input: LanguageModelInput,
122
- config: Optional[RunnableConfig] = None,
122
+ config: RunnableConfig | None = None,
123
123
  *,
124
- stop: Optional[list[str]] = None,
124
+ stop: list[str] | None = None,
125
125
  **kwargs: Any,
126
126
  ) -> AsyncIterator[str]:
127
127
  result = await self.ainvoke(input, config)
@@ -1,10 +1,10 @@
1
- """Fake ChatModel for testing purposes."""
1
+ """Fake chat models for testing purposes."""
2
2
 
3
3
  import asyncio
4
4
  import re
5
5
  import time
6
6
  from collections.abc import AsyncIterator, Iterator
7
- from typing import Any, Literal, Optional, Union, cast
7
+ from typing import Any, Literal, cast
8
8
 
9
9
  from typing_extensions import override
10
10
 
@@ -19,11 +19,11 @@ from langchain_core.runnables import RunnableConfig
19
19
 
20
20
 
21
21
  class FakeMessagesListChatModel(BaseChatModel):
22
- """Fake ``ChatModel`` for testing purposes."""
22
+ """Fake chat model for testing purposes."""
23
23
 
24
24
  responses: list[BaseMessage]
25
25
  """List of responses to **cycle** through in order."""
26
- sleep: Optional[float] = None
26
+ sleep: float | None = None
27
27
  """Sleep time in seconds between responses."""
28
28
  i: int = 0
29
29
  """Internally incremented after every model invocation."""
@@ -32,8 +32,8 @@ class FakeMessagesListChatModel(BaseChatModel):
32
32
  def _generate(
33
33
  self,
34
34
  messages: list[BaseMessage],
35
- stop: Optional[list[str]] = None,
36
- run_manager: Optional[CallbackManagerForLLMRun] = None,
35
+ stop: list[str] | None = None,
36
+ run_manager: CallbackManagerForLLMRun | None = None,
37
37
  **kwargs: Any,
38
38
  ) -> ChatResult:
39
39
  if self.sleep is not None:
@@ -57,14 +57,14 @@ class FakeListChatModelError(Exception):
57
57
 
58
58
 
59
59
  class FakeListChatModel(SimpleChatModel):
60
- """Fake ChatModel for testing purposes."""
60
+ """Fake chat model for testing purposes."""
61
61
 
62
62
  responses: list[str]
63
63
  """List of responses to **cycle** through in order."""
64
- sleep: Optional[float] = None
64
+ sleep: float | None = None
65
65
  i: int = 0
66
66
  """Internally incremented after every model invocation."""
67
- error_on_chunk_number: Optional[int] = None
67
+ error_on_chunk_number: int | None = None
68
68
  """If set, raise an error on the specified chunk number during streaming."""
69
69
 
70
70
  @property
@@ -95,8 +95,8 @@ class FakeListChatModel(SimpleChatModel):
95
95
  def _stream(
96
96
  self,
97
97
  messages: list[BaseMessage],
98
- stop: Union[list[str], None] = None,
99
- run_manager: Union[CallbackManagerForLLMRun, None] = None,
98
+ stop: list[str] | None = None,
99
+ run_manager: CallbackManagerForLLMRun | None = None,
100
100
  **kwargs: Any,
101
101
  ) -> Iterator[ChatGenerationChunk]:
102
102
  response = self.responses[self.i]
@@ -113,7 +113,7 @@ class FakeListChatModel(SimpleChatModel):
113
113
  ):
114
114
  raise FakeListChatModelError
115
115
 
116
- chunk_position: Optional[Literal["last"]] = (
116
+ chunk_position: Literal["last"] | None = (
117
117
  "last" if i_c == len(response) - 1 else None
118
118
  )
119
119
  yield ChatGenerationChunk(
@@ -124,8 +124,8 @@ class FakeListChatModel(SimpleChatModel):
124
124
  async def _astream(
125
125
  self,
126
126
  messages: list[BaseMessage],
127
- stop: Union[list[str], None] = None,
128
- run_manager: Union[AsyncCallbackManagerForLLMRun, None] = None,
127
+ stop: list[str] | None = None,
128
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
129
129
  **kwargs: Any,
130
130
  ) -> AsyncIterator[ChatGenerationChunk]:
131
131
  response = self.responses[self.i]
@@ -141,7 +141,7 @@ class FakeListChatModel(SimpleChatModel):
141
141
  and i_c == self.error_on_chunk_number
142
142
  ):
143
143
  raise FakeListChatModelError
144
- chunk_position: Optional[Literal["last"]] = (
144
+ chunk_position: Literal["last"] | None = (
145
145
  "last" if i_c == len(response) - 1 else None
146
146
  )
147
147
  yield ChatGenerationChunk(
@@ -158,27 +158,33 @@ class FakeListChatModel(SimpleChatModel):
158
158
  def batch(
159
159
  self,
160
160
  inputs: list[Any],
161
- config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None,
161
+ config: RunnableConfig | list[RunnableConfig] | None = None,
162
162
  *,
163
163
  return_exceptions: bool = False,
164
164
  **kwargs: Any,
165
165
  ) -> list[AIMessage]:
166
166
  if isinstance(config, list):
167
- return [self.invoke(m, c, **kwargs) for m, c in zip(inputs, config)]
167
+ return [
168
+ self.invoke(m, c, **kwargs)
169
+ for m, c in zip(inputs, config, strict=False)
170
+ ]
168
171
  return [self.invoke(m, config, **kwargs) for m in inputs]
169
172
 
170
173
  @override
171
174
  async def abatch(
172
175
  self,
173
176
  inputs: list[Any],
174
- config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None,
177
+ config: RunnableConfig | list[RunnableConfig] | None = None,
175
178
  *,
176
179
  return_exceptions: bool = False,
177
180
  **kwargs: Any,
178
181
  ) -> list[AIMessage]:
179
182
  if isinstance(config, list):
180
183
  # do Not use an async iterator here because need explicit ordering
181
- return [await self.ainvoke(m, c, **kwargs) for m, c in zip(inputs, config)]
184
+ return [
185
+ await self.ainvoke(m, c, **kwargs)
186
+ for m, c in zip(inputs, config, strict=False)
187
+ ]
182
188
  # do Not use an async iterator here because need explicit ordering
183
189
  return [await self.ainvoke(m, config, **kwargs) for m in inputs]
184
190
 
@@ -190,8 +196,8 @@ class FakeChatModel(SimpleChatModel):
190
196
  def _call(
191
197
  self,
192
198
  messages: list[BaseMessage],
193
- stop: Optional[list[str]] = None,
194
- run_manager: Optional[CallbackManagerForLLMRun] = None,
199
+ stop: list[str] | None = None,
200
+ run_manager: CallbackManagerForLLMRun | None = None,
195
201
  **kwargs: Any,
196
202
  ) -> str:
197
203
  return "fake response"
@@ -200,8 +206,8 @@ class FakeChatModel(SimpleChatModel):
200
206
  async def _agenerate(
201
207
  self,
202
208
  messages: list[BaseMessage],
203
- stop: Optional[list[str]] = None,
204
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
209
+ stop: list[str] | None = None,
210
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
205
211
  **kwargs: Any,
206
212
  ) -> ChatResult:
207
213
  output_str = "fake response"
@@ -222,23 +228,23 @@ class GenericFakeChatModel(BaseChatModel):
222
228
  """Generic fake chat model that can be used to test the chat model interface.
223
229
 
224
230
  * Chat model should be usable in both sync and async tests
225
- * Invokes ``on_llm_new_token`` to allow for testing of callback related code for new
226
- tokens.
231
+ * Invokes `on_llm_new_token` to allow for testing of callback related code for new
232
+ tokens.
227
233
  * Includes logic to break messages into message chunk to facilitate testing of
228
- streaming.
234
+ streaming.
229
235
 
230
236
  """
231
237
 
232
- messages: Iterator[Union[AIMessage, str]]
238
+ messages: Iterator[AIMessage | str]
233
239
  """Get an iterator over messages.
234
240
 
235
241
  This can be expanded to accept other types like Callables / dicts / strings
236
242
  to make the interface more generic if needed.
237
243
 
238
- .. note::
239
- if you want to pass a list, you can use ``iter`` to convert it to an iterator.
244
+ !!! note
245
+ if you want to pass a list, you can use `iter` to convert it to an iterator.
240
246
 
241
- .. warning::
247
+ !!! warning
242
248
  Streaming is not implemented yet. We should try to implement it in the future by
243
249
  delegating to invoke and then breaking the resulting output into message chunks.
244
250
 
@@ -248,8 +254,8 @@ class GenericFakeChatModel(BaseChatModel):
248
254
  def _generate(
249
255
  self,
250
256
  messages: list[BaseMessage],
251
- stop: Optional[list[str]] = None,
252
- run_manager: Optional[CallbackManagerForLLMRun] = None,
257
+ stop: list[str] | None = None,
258
+ run_manager: CallbackManagerForLLMRun | None = None,
253
259
  **kwargs: Any,
254
260
  ) -> ChatResult:
255
261
  message = next(self.messages)
@@ -260,8 +266,8 @@ class GenericFakeChatModel(BaseChatModel):
260
266
  def _stream(
261
267
  self,
262
268
  messages: list[BaseMessage],
263
- stop: Optional[list[str]] = None,
264
- run_manager: Optional[CallbackManagerForLLMRun] = None,
269
+ stop: list[str] | None = None,
270
+ run_manager: CallbackManagerForLLMRun | None = None,
265
271
  **kwargs: Any,
266
272
  ) -> Iterator[ChatGenerationChunk]:
267
273
  chat_result = self._generate(
@@ -376,8 +382,8 @@ class ParrotFakeChatModel(BaseChatModel):
376
382
  def _generate(
377
383
  self,
378
384
  messages: list[BaseMessage],
379
- stop: Optional[list[str]] = None,
380
- run_manager: Optional[CallbackManagerForLLMRun] = None,
385
+ stop: list[str] | None = None,
386
+ run_manager: CallbackManagerForLLMRun | None = None,
381
387
  **kwargs: Any,
382
388
  ) -> ChatResult:
383
389
  return ChatResult(generations=[ChatGeneration(message=messages[-1])])