langchain-core 1.0.0a5__py3-none-any.whl → 1.0.0a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (132) hide show
  1. langchain_core/_api/__init__.py +3 -3
  2. langchain_core/_api/beta_decorator.py +6 -6
  3. langchain_core/_api/deprecation.py +21 -29
  4. langchain_core/_api/path.py +3 -6
  5. langchain_core/_import_utils.py +2 -3
  6. langchain_core/agents.py +10 -11
  7. langchain_core/caches.py +7 -7
  8. langchain_core/callbacks/base.py +91 -91
  9. langchain_core/callbacks/file.py +11 -11
  10. langchain_core/callbacks/manager.py +86 -89
  11. langchain_core/callbacks/stdout.py +8 -8
  12. langchain_core/callbacks/usage.py +4 -4
  13. langchain_core/chat_history.py +1 -37
  14. langchain_core/document_loaders/base.py +2 -2
  15. langchain_core/document_loaders/langsmith.py +15 -15
  16. langchain_core/documents/base.py +16 -16
  17. langchain_core/documents/compressor.py +4 -4
  18. langchain_core/example_selectors/length_based.py +1 -1
  19. langchain_core/example_selectors/semantic_similarity.py +17 -19
  20. langchain_core/exceptions.py +3 -3
  21. langchain_core/globals.py +3 -151
  22. langchain_core/indexing/api.py +44 -43
  23. langchain_core/indexing/base.py +30 -30
  24. langchain_core/indexing/in_memory.py +3 -3
  25. langchain_core/language_models/_utils.py +5 -7
  26. langchain_core/language_models/base.py +18 -132
  27. langchain_core/language_models/chat_models.py +118 -227
  28. langchain_core/language_models/fake.py +11 -11
  29. langchain_core/language_models/fake_chat_models.py +35 -29
  30. langchain_core/language_models/llms.py +91 -201
  31. langchain_core/load/dump.py +1 -1
  32. langchain_core/load/load.py +11 -12
  33. langchain_core/load/mapping.py +2 -4
  34. langchain_core/load/serializable.py +2 -4
  35. langchain_core/messages/ai.py +17 -20
  36. langchain_core/messages/base.py +28 -26
  37. langchain_core/messages/block_translators/__init__.py +17 -7
  38. langchain_core/messages/block_translators/anthropic.py +3 -3
  39. langchain_core/messages/block_translators/bedrock_converse.py +2 -2
  40. langchain_core/messages/block_translators/google_genai.py +502 -20
  41. langchain_core/messages/block_translators/langchain_v0.py +2 -2
  42. langchain_core/messages/block_translators/openai.py +6 -6
  43. langchain_core/messages/content.py +120 -124
  44. langchain_core/messages/human.py +7 -7
  45. langchain_core/messages/system.py +7 -7
  46. langchain_core/messages/tool.py +24 -24
  47. langchain_core/messages/utils.py +67 -79
  48. langchain_core/output_parsers/base.py +12 -14
  49. langchain_core/output_parsers/json.py +4 -4
  50. langchain_core/output_parsers/list.py +3 -5
  51. langchain_core/output_parsers/openai_functions.py +3 -3
  52. langchain_core/output_parsers/openai_tools.py +3 -3
  53. langchain_core/output_parsers/pydantic.py +2 -2
  54. langchain_core/output_parsers/transform.py +13 -15
  55. langchain_core/output_parsers/xml.py +7 -9
  56. langchain_core/outputs/chat_generation.py +4 -4
  57. langchain_core/outputs/chat_result.py +1 -3
  58. langchain_core/outputs/generation.py +2 -2
  59. langchain_core/outputs/llm_result.py +5 -5
  60. langchain_core/prompts/__init__.py +1 -5
  61. langchain_core/prompts/base.py +10 -15
  62. langchain_core/prompts/chat.py +31 -82
  63. langchain_core/prompts/dict.py +2 -2
  64. langchain_core/prompts/few_shot.py +5 -5
  65. langchain_core/prompts/few_shot_with_templates.py +4 -4
  66. langchain_core/prompts/loading.py +3 -5
  67. langchain_core/prompts/prompt.py +4 -16
  68. langchain_core/prompts/string.py +2 -1
  69. langchain_core/prompts/structured.py +16 -23
  70. langchain_core/rate_limiters.py +3 -4
  71. langchain_core/retrievers.py +14 -14
  72. langchain_core/runnables/base.py +938 -1054
  73. langchain_core/runnables/branch.py +36 -40
  74. langchain_core/runnables/config.py +27 -35
  75. langchain_core/runnables/configurable.py +108 -124
  76. langchain_core/runnables/fallbacks.py +76 -72
  77. langchain_core/runnables/graph.py +39 -45
  78. langchain_core/runnables/graph_ascii.py +9 -11
  79. langchain_core/runnables/graph_mermaid.py +18 -19
  80. langchain_core/runnables/graph_png.py +8 -9
  81. langchain_core/runnables/history.py +114 -127
  82. langchain_core/runnables/passthrough.py +113 -139
  83. langchain_core/runnables/retry.py +43 -48
  84. langchain_core/runnables/router.py +23 -28
  85. langchain_core/runnables/schema.py +42 -44
  86. langchain_core/runnables/utils.py +28 -31
  87. langchain_core/stores.py +9 -13
  88. langchain_core/structured_query.py +8 -8
  89. langchain_core/tools/base.py +63 -115
  90. langchain_core/tools/convert.py +31 -35
  91. langchain_core/tools/render.py +1 -1
  92. langchain_core/tools/retriever.py +4 -4
  93. langchain_core/tools/simple.py +13 -17
  94. langchain_core/tools/structured.py +12 -15
  95. langchain_core/tracers/base.py +62 -64
  96. langchain_core/tracers/context.py +17 -35
  97. langchain_core/tracers/core.py +49 -53
  98. langchain_core/tracers/evaluation.py +11 -11
  99. langchain_core/tracers/event_stream.py +58 -60
  100. langchain_core/tracers/langchain.py +13 -13
  101. langchain_core/tracers/log_stream.py +22 -24
  102. langchain_core/tracers/root_listeners.py +14 -14
  103. langchain_core/tracers/run_collector.py +2 -4
  104. langchain_core/tracers/schemas.py +8 -8
  105. langchain_core/tracers/stdout.py +2 -1
  106. langchain_core/utils/__init__.py +0 -3
  107. langchain_core/utils/_merge.py +2 -2
  108. langchain_core/utils/aiter.py +24 -28
  109. langchain_core/utils/env.py +4 -4
  110. langchain_core/utils/function_calling.py +31 -41
  111. langchain_core/utils/html.py +3 -4
  112. langchain_core/utils/input.py +3 -3
  113. langchain_core/utils/iter.py +15 -19
  114. langchain_core/utils/json.py +3 -2
  115. langchain_core/utils/json_schema.py +6 -6
  116. langchain_core/utils/mustache.py +3 -5
  117. langchain_core/utils/pydantic.py +16 -18
  118. langchain_core/utils/usage.py +1 -1
  119. langchain_core/utils/utils.py +29 -29
  120. langchain_core/vectorstores/base.py +18 -21
  121. langchain_core/vectorstores/in_memory.py +14 -87
  122. langchain_core/vectorstores/utils.py +2 -2
  123. langchain_core/version.py +1 -1
  124. {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.0a7.dist-info}/METADATA +10 -31
  125. langchain_core-1.0.0a7.dist-info/RECORD +176 -0
  126. {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.0a7.dist-info}/WHEEL +1 -1
  127. langchain_core/messages/block_translators/ollama.py +0 -47
  128. langchain_core/prompts/pipeline.py +0 -138
  129. langchain_core/tracers/langchain_v1.py +0 -31
  130. langchain_core/utils/loading.py +0 -35
  131. langchain_core-1.0.0a5.dist-info/RECORD +0 -181
  132. langchain_core-1.0.0a5.dist-info/entry_points.txt +0 -4
@@ -3,7 +3,7 @@
3
3
  import asyncio
4
4
  import time
5
5
  from collections.abc import AsyncIterator, Iterator, Mapping
6
- from typing import Any, Optional
6
+ from typing import Any
7
7
 
8
8
  from typing_extensions import override
9
9
 
@@ -23,7 +23,7 @@ class FakeListLLM(LLM):
23
23
  """List of responses to return in order."""
24
24
  # This parameter should be removed from FakeListLLM since
25
25
  # it's only used by sub-classes.
26
- sleep: Optional[float] = None
26
+ sleep: float | None = None
27
27
  """Sleep time in seconds between responses.
28
28
 
29
29
  Ignored by FakeListLLM, but used by sub-classes.
@@ -44,8 +44,8 @@ class FakeListLLM(LLM):
44
44
  def _call(
45
45
  self,
46
46
  prompt: str,
47
- stop: Optional[list[str]] = None,
48
- run_manager: Optional[CallbackManagerForLLMRun] = None,
47
+ stop: list[str] | None = None,
48
+ run_manager: CallbackManagerForLLMRun | None = None,
49
49
  **kwargs: Any,
50
50
  ) -> str:
51
51
  """Return next response."""
@@ -60,8 +60,8 @@ class FakeListLLM(LLM):
60
60
  async def _acall(
61
61
  self,
62
62
  prompt: str,
63
- stop: Optional[list[str]] = None,
64
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
63
+ stop: list[str] | None = None,
64
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
65
65
  **kwargs: Any,
66
66
  ) -> str:
67
67
  """Return next response."""
@@ -91,16 +91,16 @@ class FakeStreamingListLLM(FakeListLLM):
91
91
  chunks in a streaming implementation.
92
92
  """
93
93
 
94
- error_on_chunk_number: Optional[int] = None
94
+ error_on_chunk_number: int | None = None
95
95
  """If set, will raise an exception on the specified chunk number."""
96
96
 
97
97
  @override
98
98
  def stream(
99
99
  self,
100
100
  input: LanguageModelInput,
101
- config: Optional[RunnableConfig] = None,
101
+ config: RunnableConfig | None = None,
102
102
  *,
103
- stop: Optional[list[str]] = None,
103
+ stop: list[str] | None = None,
104
104
  **kwargs: Any,
105
105
  ) -> Iterator[str]:
106
106
  result = self.invoke(input, config)
@@ -119,9 +119,9 @@ class FakeStreamingListLLM(FakeListLLM):
119
119
  async def astream(
120
120
  self,
121
121
  input: LanguageModelInput,
122
- config: Optional[RunnableConfig] = None,
122
+ config: RunnableConfig | None = None,
123
123
  *,
124
- stop: Optional[list[str]] = None,
124
+ stop: list[str] | None = None,
125
125
  **kwargs: Any,
126
126
  ) -> AsyncIterator[str]:
127
127
  result = await self.ainvoke(input, config)
@@ -4,7 +4,7 @@ import asyncio
4
4
  import re
5
5
  import time
6
6
  from collections.abc import AsyncIterator, Iterator
7
- from typing import Any, Literal, Optional, Union, cast
7
+ from typing import Any, Literal, cast
8
8
 
9
9
  from typing_extensions import override
10
10
 
@@ -23,7 +23,7 @@ class FakeMessagesListChatModel(BaseChatModel):
23
23
 
24
24
  responses: list[BaseMessage]
25
25
  """List of responses to **cycle** through in order."""
26
- sleep: Optional[float] = None
26
+ sleep: float | None = None
27
27
  """Sleep time in seconds between responses."""
28
28
  i: int = 0
29
29
  """Internally incremented after every model invocation."""
@@ -32,8 +32,8 @@ class FakeMessagesListChatModel(BaseChatModel):
32
32
  def _generate(
33
33
  self,
34
34
  messages: list[BaseMessage],
35
- stop: Optional[list[str]] = None,
36
- run_manager: Optional[CallbackManagerForLLMRun] = None,
35
+ stop: list[str] | None = None,
36
+ run_manager: CallbackManagerForLLMRun | None = None,
37
37
  **kwargs: Any,
38
38
  ) -> ChatResult:
39
39
  if self.sleep is not None:
@@ -61,10 +61,10 @@ class FakeListChatModel(SimpleChatModel):
61
61
 
62
62
  responses: list[str]
63
63
  """List of responses to **cycle** through in order."""
64
- sleep: Optional[float] = None
64
+ sleep: float | None = None
65
65
  i: int = 0
66
66
  """Internally incremented after every model invocation."""
67
- error_on_chunk_number: Optional[int] = None
67
+ error_on_chunk_number: int | None = None
68
68
  """If set, raise an error on the specified chunk number during streaming."""
69
69
 
70
70
  @property
@@ -95,8 +95,8 @@ class FakeListChatModel(SimpleChatModel):
95
95
  def _stream(
96
96
  self,
97
97
  messages: list[BaseMessage],
98
- stop: Union[list[str], None] = None,
99
- run_manager: Union[CallbackManagerForLLMRun, None] = None,
98
+ stop: list[str] | None = None,
99
+ run_manager: CallbackManagerForLLMRun | None = None,
100
100
  **kwargs: Any,
101
101
  ) -> Iterator[ChatGenerationChunk]:
102
102
  response = self.responses[self.i]
@@ -113,7 +113,7 @@ class FakeListChatModel(SimpleChatModel):
113
113
  ):
114
114
  raise FakeListChatModelError
115
115
 
116
- chunk_position: Optional[Literal["last"]] = (
116
+ chunk_position: Literal["last"] | None = (
117
117
  "last" if i_c == len(response) - 1 else None
118
118
  )
119
119
  yield ChatGenerationChunk(
@@ -124,8 +124,8 @@ class FakeListChatModel(SimpleChatModel):
124
124
  async def _astream(
125
125
  self,
126
126
  messages: list[BaseMessage],
127
- stop: Union[list[str], None] = None,
128
- run_manager: Union[AsyncCallbackManagerForLLMRun, None] = None,
127
+ stop: list[str] | None = None,
128
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
129
129
  **kwargs: Any,
130
130
  ) -> AsyncIterator[ChatGenerationChunk]:
131
131
  response = self.responses[self.i]
@@ -141,7 +141,7 @@ class FakeListChatModel(SimpleChatModel):
141
141
  and i_c == self.error_on_chunk_number
142
142
  ):
143
143
  raise FakeListChatModelError
144
- chunk_position: Optional[Literal["last"]] = (
144
+ chunk_position: Literal["last"] | None = (
145
145
  "last" if i_c == len(response) - 1 else None
146
146
  )
147
147
  yield ChatGenerationChunk(
@@ -158,27 +158,33 @@ class FakeListChatModel(SimpleChatModel):
158
158
  def batch(
159
159
  self,
160
160
  inputs: list[Any],
161
- config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None,
161
+ config: RunnableConfig | list[RunnableConfig] | None = None,
162
162
  *,
163
163
  return_exceptions: bool = False,
164
164
  **kwargs: Any,
165
165
  ) -> list[AIMessage]:
166
166
  if isinstance(config, list):
167
- return [self.invoke(m, c, **kwargs) for m, c in zip(inputs, config)]
167
+ return [
168
+ self.invoke(m, c, **kwargs)
169
+ for m, c in zip(inputs, config, strict=False)
170
+ ]
168
171
  return [self.invoke(m, config, **kwargs) for m in inputs]
169
172
 
170
173
  @override
171
174
  async def abatch(
172
175
  self,
173
176
  inputs: list[Any],
174
- config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None,
177
+ config: RunnableConfig | list[RunnableConfig] | None = None,
175
178
  *,
176
179
  return_exceptions: bool = False,
177
180
  **kwargs: Any,
178
181
  ) -> list[AIMessage]:
179
182
  if isinstance(config, list):
180
183
  # do Not use an async iterator here because need explicit ordering
181
- return [await self.ainvoke(m, c, **kwargs) for m, c in zip(inputs, config)]
184
+ return [
185
+ await self.ainvoke(m, c, **kwargs)
186
+ for m, c in zip(inputs, config, strict=False)
187
+ ]
182
188
  # do Not use an async iterator here because need explicit ordering
183
189
  return [await self.ainvoke(m, config, **kwargs) for m in inputs]
184
190
 
@@ -190,8 +196,8 @@ class FakeChatModel(SimpleChatModel):
190
196
  def _call(
191
197
  self,
192
198
  messages: list[BaseMessage],
193
- stop: Optional[list[str]] = None,
194
- run_manager: Optional[CallbackManagerForLLMRun] = None,
199
+ stop: list[str] | None = None,
200
+ run_manager: CallbackManagerForLLMRun | None = None,
195
201
  **kwargs: Any,
196
202
  ) -> str:
197
203
  return "fake response"
@@ -200,8 +206,8 @@ class FakeChatModel(SimpleChatModel):
200
206
  async def _agenerate(
201
207
  self,
202
208
  messages: list[BaseMessage],
203
- stop: Optional[list[str]] = None,
204
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
209
+ stop: list[str] | None = None,
210
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
205
211
  **kwargs: Any,
206
212
  ) -> ChatResult:
207
213
  output_str = "fake response"
@@ -229,16 +235,16 @@ class GenericFakeChatModel(BaseChatModel):
229
235
 
230
236
  """
231
237
 
232
- messages: Iterator[Union[AIMessage, str]]
238
+ messages: Iterator[AIMessage | str]
233
239
  """Get an iterator over messages.
234
240
 
235
241
  This can be expanded to accept other types like Callables / dicts / strings
236
242
  to make the interface more generic if needed.
237
243
 
238
- .. note::
244
+ !!! note
239
245
  if you want to pass a list, you can use ``iter`` to convert it to an iterator.
240
246
 
241
- .. warning::
247
+ !!! warning
242
248
  Streaming is not implemented yet. We should try to implement it in the future by
243
249
  delegating to invoke and then breaking the resulting output into message chunks.
244
250
 
@@ -248,8 +254,8 @@ class GenericFakeChatModel(BaseChatModel):
248
254
  def _generate(
249
255
  self,
250
256
  messages: list[BaseMessage],
251
- stop: Optional[list[str]] = None,
252
- run_manager: Optional[CallbackManagerForLLMRun] = None,
257
+ stop: list[str] | None = None,
258
+ run_manager: CallbackManagerForLLMRun | None = None,
253
259
  **kwargs: Any,
254
260
  ) -> ChatResult:
255
261
  message = next(self.messages)
@@ -260,8 +266,8 @@ class GenericFakeChatModel(BaseChatModel):
260
266
  def _stream(
261
267
  self,
262
268
  messages: list[BaseMessage],
263
- stop: Optional[list[str]] = None,
264
- run_manager: Optional[CallbackManagerForLLMRun] = None,
269
+ stop: list[str] | None = None,
270
+ run_manager: CallbackManagerForLLMRun | None = None,
265
271
  **kwargs: Any,
266
272
  ) -> Iterator[ChatGenerationChunk]:
267
273
  chat_result = self._generate(
@@ -376,8 +382,8 @@ class ParrotFakeChatModel(BaseChatModel):
376
382
  def _generate(
377
383
  self,
378
384
  messages: list[BaseMessage],
379
- stop: Optional[list[str]] = None,
380
- run_manager: Optional[CallbackManagerForLLMRun] = None,
385
+ stop: list[str] | None = None,
386
+ run_manager: CallbackManagerForLLMRun | None = None,
381
387
  **kwargs: Any,
382
388
  ) -> ChatResult:
383
389
  return ChatResult(generations=[ChatGeneration(message=messages[-1])])